Rename lib/ceph to lib/charms_ceph
The new python3-ceph-common deb package (introduced in ceph octopus) adds a new ceph directory (a parent package in python terms) in /usr/lib/python3/dist-packages/ceph/. This results in a conflict with charm-ceph-osd/lib/ceph/. For example, with the current import of ceph.utils in hooks/ceph_hooks.py, Python finds no utils.py in /usr/lib/python3/dist-packages/ceph/ and then stops searching. Therefore, rename lib/ceph to lib/charms_ceph to avoid the conflict. Depends-On: https://review.opendev.org/#/c/709226 Change-Id: I13ae7c048d8f1eef2ea64b13ae14b51dbfaaf3cd
This commit is contained in:
parent
81a11f4d7f
commit
fcfa499f11
@ -27,19 +27,19 @@ import charmhelpers.core.hookenv as hookenv
|
||||
from charmhelpers.core.unitdata import kv
|
||||
|
||||
import ceph_hooks
|
||||
import ceph.utils
|
||||
import charms_ceph.utils
|
||||
|
||||
|
||||
def add_device(request, device_path, bucket=None):
|
||||
ceph.utils.osdize(device_path, hookenv.config('osd-format'),
|
||||
ceph_hooks.get_journal_devices(),
|
||||
hookenv.config('ignore-device-errors'),
|
||||
hookenv.config('osd-encrypt'),
|
||||
hookenv.config('bluestore'),
|
||||
hookenv.config('osd-encrypt-keymanager'))
|
||||
charms_ceph.utils.osdize(device_path, hookenv.config('osd-format'),
|
||||
ceph_hooks.get_journal_devices(),
|
||||
hookenv.config('ignore-device-errors'),
|
||||
hookenv.config('osd-encrypt'),
|
||||
hookenv.config('bluestore'),
|
||||
hookenv.config('osd-encrypt-keymanager'))
|
||||
# Make it fast!
|
||||
if hookenv.config('autotune'):
|
||||
ceph.utils.tune_dev(device_path)
|
||||
charms_ceph.utils.tune_dev(device_path)
|
||||
mounts = filter(lambda disk: device_path
|
||||
in disk.device, psutil.disk_partitions())
|
||||
for osd in mounts:
|
||||
|
@ -36,7 +36,7 @@ sys.path.append('lib/')
|
||||
|
||||
import charmhelpers.core.hookenv as hookenv
|
||||
|
||||
import ceph.utils
|
||||
import charms_ceph.utils
|
||||
import utils
|
||||
|
||||
|
||||
@ -46,13 +46,15 @@ def list_disk():
|
||||
for journal in utils.get_journal_devices():
|
||||
osd_journal.append(os.path.realpath(journal))
|
||||
|
||||
for dev in list(set(ceph.utils.unmounted_disks()) - set(osd_journal)):
|
||||
if (not ceph.utils.is_active_bluestore_device(dev) and
|
||||
not ceph.utils.is_pristine_disk(dev)):
|
||||
for dev in list(set(charms_ceph.utils.unmounted_disks()) -
|
||||
set(osd_journal)):
|
||||
if (not charms_ceph.utils.is_active_bluestore_device(dev) and
|
||||
not charms_ceph.utils.is_pristine_disk(dev)):
|
||||
non_pristine.append(dev)
|
||||
|
||||
hookenv.action_set({
|
||||
'disks': list(set(ceph.utils.unmounted_disks()) - set(osd_journal)),
|
||||
'disks': list(set(charms_ceph.utils.unmounted_disks()) -
|
||||
set(osd_journal)),
|
||||
'blacklist': utils.get_blacklist(),
|
||||
'non-pristine': non_pristine,
|
||||
})
|
||||
|
@ -27,7 +27,7 @@ from charmhelpers.core.hookenv import (
|
||||
action_fail,
|
||||
)
|
||||
|
||||
from ceph.utils import get_local_osd_ids
|
||||
from charms_ceph.utils import get_local_osd_ids
|
||||
from ceph_hooks import assess_status
|
||||
|
||||
|
||||
|
@ -27,8 +27,8 @@ from charmhelpers.contrib.storage.linux.utils import (
|
||||
zap_disk,
|
||||
)
|
||||
from charmhelpers.core.unitdata import kv
|
||||
from ceph.utils import is_active_bluestore_device
|
||||
from ceph.utils import is_mapped_luks_device
|
||||
from charms_ceph.utils import is_active_bluestore_device
|
||||
from charms_ceph.utils import is_mapped_luks_device
|
||||
|
||||
|
||||
def get_devices():
|
||||
|
@ -25,7 +25,7 @@ import subprocess
|
||||
import sys
|
||||
|
||||
sys.path.append('lib')
|
||||
import ceph.utils as ceph
|
||||
import charms_ceph.utils as ceph
|
||||
from charmhelpers.core import hookenv
|
||||
from charmhelpers.core.hookenv import (
|
||||
log,
|
||||
|
@ -19,7 +19,7 @@ import subprocess
|
||||
import sys
|
||||
|
||||
sys.path.append('lib')
|
||||
import ceph.utils as ceph
|
||||
import charms_ceph.utils as ceph
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
unit_get,
|
||||
|
@ -18,11 +18,11 @@ import os
|
||||
|
||||
from tempfile import NamedTemporaryFile
|
||||
|
||||
from ceph.utils import (
|
||||
from charms_ceph.utils import (
|
||||
get_cephfs,
|
||||
get_osd_weight
|
||||
)
|
||||
from ceph.crush_utils import Crushmap
|
||||
from charms_ceph.crush_utils import Crushmap
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
log,
|
@ -26,7 +26,7 @@ class AddDiskActionTests(CharmTestCase):
|
||||
self.kv.return_value = self.kv
|
||||
|
||||
@mock.patch.object(add_disk.ceph_hooks, 'get_journal_devices')
|
||||
@mock.patch.object(add_disk.ceph.utils, 'osdize')
|
||||
@mock.patch.object(add_disk.charms_ceph.utils, 'osdize')
|
||||
def test_add_device(self, mock_osdize, mock_get_journal_devices):
|
||||
|
||||
def fake_config(key):
|
||||
|
@ -7,16 +7,17 @@ class ListDisksActionTests(CharmTestCase):
|
||||
def setUp(self):
|
||||
super(ListDisksActionTests, self).setUp(
|
||||
list_disks, ['hookenv',
|
||||
'ceph',
|
||||
'charms_ceph',
|
||||
'utils',
|
||||
'os'])
|
||||
self.ceph.utils.unmounted_disks.return_value = ['/dev/sda', '/dev/sdm']
|
||||
self.charms_ceph.utils.unmounted_disks.return_value = ['/dev/sda',
|
||||
'/dev/sdm']
|
||||
|
||||
def test_list_disks_journal_symbol_link(self):
|
||||
self.utils.get_journal_devices.return_value = {'/dev/disk/ceph/sdm'}
|
||||
self.os.path.realpath.return_value = '/dev/sdm'
|
||||
self.ceph.utils.is_active_bluestore_device.return_value = False
|
||||
self.ceph.utils.is_pristine_disk.return_value = False
|
||||
self.charms_ceph.utils.is_active_bluestore_device.return_value = False
|
||||
self.charms_ceph.utils.is_pristine_disk.return_value = False
|
||||
self.utils.get_blacklist.return_value = []
|
||||
list_disks.list_disk()
|
||||
self.hookenv.action_set.assert_called_with({
|
||||
|
@ -1,7 +1,7 @@
|
||||
__author__ = 'Chris Holcombe <chris.holcombe@canonical.com>'
|
||||
from mock import patch, call
|
||||
import test_utils
|
||||
import ceph.utils as ceph
|
||||
import charms_ceph.utils as ceph
|
||||
|
||||
TO_PATCH = [
|
||||
'hookenv',
|
||||
|
Loading…
Reference in New Issue
Block a user