Remove FileStore support
Remove support for creating FileStore OSDs. Also prevent upgrade attempts to Reef if a FileStore OSD is detected Change-Id: I9609bc0222365cb1f4059312b466a12ef4e0397f
This commit is contained in:
parent
ceb8187284
commit
1bac66ee50
@ -115,7 +115,6 @@ def add_device(request, device_path, bucket=None,
|
|||||||
ceph_hooks.get_journal_devices(),
|
ceph_hooks.get_journal_devices(),
|
||||||
hookenv.config('ignore-device-errors'),
|
hookenv.config('ignore-device-errors'),
|
||||||
hookenv.config('osd-encrypt'),
|
hookenv.config('osd-encrypt'),
|
||||||
charms_ceph.utils.use_bluestore(),
|
|
||||||
hookenv.config('osd-encrypt-keymanager'),
|
hookenv.config('osd-encrypt-keymanager'),
|
||||||
osd_id)
|
osd_id)
|
||||||
|
|
||||||
|
@ -90,15 +90,6 @@ options:
|
|||||||
where the specified journal device does not exist on a node.
|
where the specified journal device does not exist on a node.
|
||||||
.
|
.
|
||||||
Only supported with ceph >= 0.48.3.
|
Only supported with ceph >= 0.48.3.
|
||||||
bluestore:
|
|
||||||
type: boolean
|
|
||||||
default: True
|
|
||||||
description: |
|
|
||||||
Enable BlueStore storage backend for OSD devices.
|
|
||||||
.
|
|
||||||
Only supported with ceph >= 12.2.0.
|
|
||||||
.
|
|
||||||
Setting to 'False' will use FileStore as the storage format.
|
|
||||||
bluestore-wal:
|
bluestore-wal:
|
||||||
type: string
|
type: string
|
||||||
default:
|
default:
|
||||||
|
@ -26,6 +26,8 @@ import subprocess
|
|||||||
import sys
|
import sys
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
|
import utils
|
||||||
|
|
||||||
sys.path.append('lib')
|
sys.path.append('lib')
|
||||||
import charms_ceph.utils as ceph
|
import charms_ceph.utils as ceph
|
||||||
from charmhelpers.core import hookenv
|
from charmhelpers.core import hookenv
|
||||||
@ -147,6 +149,14 @@ def check_for_upgrade():
|
|||||||
'distro')
|
'distro')
|
||||||
new_version_os = get_os_codename_install_source(hookenv.config('source'))
|
new_version_os = get_os_codename_install_source(hookenv.config('source'))
|
||||||
|
|
||||||
|
# If the new version is reef, and we detect that we are running FileStore
|
||||||
|
# bail out with an error message
|
||||||
|
filestore_osds = utils.find_filestore_osds()
|
||||||
|
if new_version == 'reef' and filestore_osds:
|
||||||
|
log("Refuse to upgrade to reef with FileStore OSDs present: {}".format(
|
||||||
|
filestore_osds), level=ERROR)
|
||||||
|
return
|
||||||
|
|
||||||
# May be in a previous upgrade that was failed if the directories
|
# May be in a previous upgrade that was failed if the directories
|
||||||
# still need an ownership update. Check this condition.
|
# still need an ownership update. Check this condition.
|
||||||
resuming_upgrade = ceph.dirs_need_ownership_update('osd')
|
resuming_upgrade = ceph.dirs_need_ownership_update('osd')
|
||||||
@ -464,7 +474,6 @@ def get_ceph_context(upgrading=False):
|
|||||||
'dio': str(config('use-direct-io')).lower(),
|
'dio': str(config('use-direct-io')).lower(),
|
||||||
'short_object_len': use_short_objects(),
|
'short_object_len': use_short_objects(),
|
||||||
'upgrade_in_progress': upgrading,
|
'upgrade_in_progress': upgrading,
|
||||||
'bluestore': ceph.use_bluestore(),
|
|
||||||
'bluestore_experimental': cmp_pkgrevno('ceph', '12.1.0') < 0,
|
'bluestore_experimental': cmp_pkgrevno('ceph', '12.1.0') < 0,
|
||||||
'bluestore_block_wal_size': config('bluestore-block-wal-size'),
|
'bluestore_block_wal_size': config('bluestore-block-wal-size'),
|
||||||
'bluestore_block_db_size': config('bluestore-block-db-size'),
|
'bluestore_block_db_size': config('bluestore-block-db-size'),
|
||||||
@ -619,13 +628,11 @@ def prepare_disks_and_activate():
|
|||||||
log('ceph bootstrapped, rescanning disks')
|
log('ceph bootstrapped, rescanning disks')
|
||||||
emit_cephconf()
|
emit_cephconf()
|
||||||
ceph.udevadm_settle()
|
ceph.udevadm_settle()
|
||||||
bluestore = ceph.use_bluestore()
|
|
||||||
for dev in get_devices():
|
for dev in get_devices():
|
||||||
ceph.osdize(dev, config('osd-format'),
|
ceph.osdize(dev, config('osd-format'),
|
||||||
osd_journal,
|
osd_journal,
|
||||||
config('ignore-device-errors'),
|
config('ignore-device-errors'),
|
||||||
config('osd-encrypt'),
|
config('osd-encrypt'),
|
||||||
bluestore,
|
|
||||||
config('osd-encrypt-keymanager'))
|
config('osd-encrypt-keymanager'))
|
||||||
# Make it fast!
|
# Make it fast!
|
||||||
if config('autotune'):
|
if config('autotune'):
|
||||||
|
@ -692,3 +692,26 @@ def get_parent_device(dev):
|
|||||||
return '/dev/' + child['name']
|
return '/dev/' + child['name']
|
||||||
|
|
||||||
return dev
|
return dev
|
||||||
|
|
||||||
|
|
||||||
|
def find_filestore_osds():
|
||||||
|
# Path to Ceph OSD
|
||||||
|
osd_path = '/var/lib/ceph/osd'
|
||||||
|
|
||||||
|
# Search through OSD directories in path starting with 'ceph-'
|
||||||
|
dirs = [d for d in os.listdir(osd_path)
|
||||||
|
if d.startswith('ceph-')
|
||||||
|
and os.path.isdir(os.path.join(osd_path, d))]
|
||||||
|
|
||||||
|
found = []
|
||||||
|
for dir in dirs:
|
||||||
|
# Construct the full path
|
||||||
|
type_file_path = os.path.join(osd_path, dir, 'type')
|
||||||
|
# Open and read the type file
|
||||||
|
with open(type_file_path, 'r') as f:
|
||||||
|
content = f.read()
|
||||||
|
# Check if the content includes 'filestore'
|
||||||
|
if 'filestore' in content:
|
||||||
|
found.append(dir)
|
||||||
|
|
||||||
|
return found
|
||||||
|
@ -1324,16 +1324,6 @@ def systemd():
|
|||||||
return CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'vivid'
|
return CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'vivid'
|
||||||
|
|
||||||
|
|
||||||
def use_bluestore():
|
|
||||||
"""Determine whether bluestore should be used for OSD's
|
|
||||||
|
|
||||||
:returns: whether bluestore disk format should be used
|
|
||||||
:rtype: bool"""
|
|
||||||
if cmp_pkgrevno('ceph', '12.2.0') < 0:
|
|
||||||
return False
|
|
||||||
return config('bluestore')
|
|
||||||
|
|
||||||
|
|
||||||
def bootstrap_monitor_cluster(secret):
|
def bootstrap_monitor_cluster(secret):
|
||||||
"""Bootstrap local Ceph mon into the Ceph cluster
|
"""Bootstrap local Ceph mon into the Ceph cluster
|
||||||
|
|
||||||
@ -1551,21 +1541,21 @@ def get_devices(name):
|
|||||||
|
|
||||||
|
|
||||||
def osdize(dev, osd_format, osd_journal, ignore_errors=False, encrypt=False,
|
def osdize(dev, osd_format, osd_journal, ignore_errors=False, encrypt=False,
|
||||||
bluestore=False, key_manager=CEPH_KEY_MANAGER, osd_id=None):
|
key_manager=CEPH_KEY_MANAGER, osd_id=None):
|
||||||
if dev.startswith('/dev'):
|
if dev.startswith('/dev'):
|
||||||
osdize_dev(dev, osd_format, osd_journal,
|
osdize_dev(dev, osd_format, osd_journal,
|
||||||
ignore_errors, encrypt,
|
ignore_errors, encrypt,
|
||||||
bluestore, key_manager, osd_id)
|
key_manager, osd_id)
|
||||||
else:
|
else:
|
||||||
if cmp_pkgrevno('ceph', '14.0.0') >= 0:
|
if cmp_pkgrevno('ceph', '14.0.0') >= 0:
|
||||||
log("Directory backed OSDs can not be created on Nautilus",
|
log("Directory backed OSDs can not be created on Nautilus",
|
||||||
level=WARNING)
|
level=WARNING)
|
||||||
return
|
return
|
||||||
osdize_dir(dev, encrypt, bluestore)
|
osdize_dir(dev, encrypt)
|
||||||
|
|
||||||
|
|
||||||
def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False,
|
def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False,
|
||||||
encrypt=False, bluestore=False, key_manager=CEPH_KEY_MANAGER,
|
encrypt=False, key_manager=CEPH_KEY_MANAGER,
|
||||||
osd_id=None):
|
osd_id=None):
|
||||||
"""
|
"""
|
||||||
Prepare a block device for use as a Ceph OSD
|
Prepare a block device for use as a Ceph OSD
|
||||||
@ -1579,7 +1569,6 @@ def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False,
|
|||||||
:param: ignore_errors: Don't fail in the event of any errors during
|
:param: ignore_errors: Don't fail in the event of any errors during
|
||||||
processing
|
processing
|
||||||
:param: encrypt: Encrypt block devices using 'key_manager'
|
:param: encrypt: Encrypt block devices using 'key_manager'
|
||||||
:param: bluestore: Use bluestore native Ceph block device format
|
|
||||||
:param: key_manager: Key management approach for encryption keys
|
:param: key_manager: Key management approach for encryption keys
|
||||||
:raises subprocess.CalledProcessError: in the event that any supporting
|
:raises subprocess.CalledProcessError: in the event that any supporting
|
||||||
subprocess operation failed
|
subprocess operation failed
|
||||||
@ -1630,15 +1619,13 @@ def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False,
|
|||||||
cmd = _ceph_volume(dev,
|
cmd = _ceph_volume(dev,
|
||||||
osd_journal,
|
osd_journal,
|
||||||
encrypt,
|
encrypt,
|
||||||
bluestore,
|
|
||||||
key_manager,
|
key_manager,
|
||||||
osd_id)
|
osd_id)
|
||||||
else:
|
else:
|
||||||
cmd = _ceph_disk(dev,
|
cmd = _ceph_disk(dev,
|
||||||
osd_format,
|
osd_format,
|
||||||
osd_journal,
|
osd_journal,
|
||||||
encrypt,
|
encrypt)
|
||||||
bluestore)
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
status_set('maintenance', 'Initializing device {}'.format(dev))
|
status_set('maintenance', 'Initializing device {}'.format(dev))
|
||||||
@ -1669,7 +1656,7 @@ def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False,
|
|||||||
db.flush()
|
db.flush()
|
||||||
|
|
||||||
|
|
||||||
def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False):
|
def _ceph_disk(dev, osd_format, osd_journal, encrypt=False):
|
||||||
"""
|
"""
|
||||||
Prepare a device for usage as a Ceph OSD using ceph-disk
|
Prepare a device for usage as a Ceph OSD using ceph-disk
|
||||||
|
|
||||||
@ -1677,7 +1664,6 @@ def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False):
|
|||||||
The function looks up realpath of the device
|
The function looks up realpath of the device
|
||||||
:param: osd_journal: List of block devices to use for OSD journals
|
:param: osd_journal: List of block devices to use for OSD journals
|
||||||
:param: encrypt: Use block device encryption (unsupported)
|
:param: encrypt: Use block device encryption (unsupported)
|
||||||
:param: bluestore: Use bluestore storage for OSD
|
|
||||||
:returns: list. 'ceph-disk' command and required parameters for
|
:returns: list. 'ceph-disk' command and required parameters for
|
||||||
execution by check_call
|
execution by check_call
|
||||||
"""
|
"""
|
||||||
@ -1686,25 +1672,17 @@ def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False):
|
|||||||
if encrypt:
|
if encrypt:
|
||||||
cmd.append('--dmcrypt')
|
cmd.append('--dmcrypt')
|
||||||
|
|
||||||
if osd_format and not bluestore:
|
cmd.append('--bluestore')
|
||||||
cmd.append('--fs-type')
|
wal = get_devices('bluestore-wal')
|
||||||
cmd.append(osd_format)
|
if wal:
|
||||||
|
cmd.append('--block.wal')
|
||||||
# NOTE(jamespage): enable experimental bluestore support
|
least_used_wal = find_least_used_utility_device(wal)
|
||||||
if use_bluestore():
|
cmd.append(least_used_wal)
|
||||||
cmd.append('--bluestore')
|
db = get_devices('bluestore-db')
|
||||||
wal = get_devices('bluestore-wal')
|
if db:
|
||||||
if wal:
|
cmd.append('--block.db')
|
||||||
cmd.append('--block.wal')
|
least_used_db = find_least_used_utility_device(db)
|
||||||
least_used_wal = find_least_used_utility_device(wal)
|
cmd.append(least_used_db)
|
||||||
cmd.append(least_used_wal)
|
|
||||||
db = get_devices('bluestore-db')
|
|
||||||
if db:
|
|
||||||
cmd.append('--block.db')
|
|
||||||
least_used_db = find_least_used_utility_device(db)
|
|
||||||
cmd.append(least_used_db)
|
|
||||||
elif cmp_pkgrevno('ceph', '12.1.0') >= 0 and not bluestore:
|
|
||||||
cmd.append('--filestore')
|
|
||||||
|
|
||||||
cmd.append(os.path.realpath(dev))
|
cmd.append(os.path.realpath(dev))
|
||||||
|
|
||||||
@ -1715,8 +1693,8 @@ def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False):
|
|||||||
return cmd
|
return cmd
|
||||||
|
|
||||||
|
|
||||||
def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False,
|
def _ceph_volume(dev, osd_journal, encrypt=False, key_manager=CEPH_KEY_MANAGER,
|
||||||
key_manager=CEPH_KEY_MANAGER, osd_id=None):
|
osd_id=None):
|
||||||
"""
|
"""
|
||||||
Prepare and activate a device for usage as a Ceph OSD using ceph-volume.
|
Prepare and activate a device for usage as a Ceph OSD using ceph-volume.
|
||||||
|
|
||||||
@ -1726,7 +1704,6 @@ def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False,
|
|||||||
:param: dev: Full path to use for OSD block device setup
|
:param: dev: Full path to use for OSD block device setup
|
||||||
:param: osd_journal: List of block devices to use for OSD journals
|
:param: osd_journal: List of block devices to use for OSD journals
|
||||||
:param: encrypt: Use block device encryption
|
:param: encrypt: Use block device encryption
|
||||||
:param: bluestore: Use bluestore storage for OSD
|
|
||||||
:param: key_manager: dm-crypt Key Manager to use
|
:param: key_manager: dm-crypt Key Manager to use
|
||||||
:param: osd_id: The OSD-id to recycle, or None to create a new one
|
:param: osd_id: The OSD-id to recycle, or None to create a new one
|
||||||
:raises subprocess.CalledProcessError: in the event that any supporting
|
:raises subprocess.CalledProcessError: in the event that any supporting
|
||||||
@ -1739,13 +1716,8 @@ def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False,
|
|||||||
osd_fsid = str(uuid.uuid4())
|
osd_fsid = str(uuid.uuid4())
|
||||||
cmd.append('--osd-fsid')
|
cmd.append('--osd-fsid')
|
||||||
cmd.append(osd_fsid)
|
cmd.append(osd_fsid)
|
||||||
|
cmd.append('--bluestore')
|
||||||
if bluestore:
|
main_device_type = 'block'
|
||||||
cmd.append('--bluestore')
|
|
||||||
main_device_type = 'block'
|
|
||||||
else:
|
|
||||||
cmd.append('--filestore')
|
|
||||||
main_device_type = 'data'
|
|
||||||
|
|
||||||
if encrypt and key_manager == CEPH_KEY_MANAGER:
|
if encrypt and key_manager == CEPH_KEY_MANAGER:
|
||||||
cmd.append('--dmcrypt')
|
cmd.append('--dmcrypt')
|
||||||
@ -1753,19 +1725,6 @@ def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False,
|
|||||||
if osd_id is not None:
|
if osd_id is not None:
|
||||||
cmd.extend(['--osd-id', str(osd_id)])
|
cmd.extend(['--osd-id', str(osd_id)])
|
||||||
|
|
||||||
# On-disk journal volume creation
|
|
||||||
if not osd_journal and not bluestore:
|
|
||||||
journal_lv_type = 'journal'
|
|
||||||
cmd.append('--journal')
|
|
||||||
cmd.append(_allocate_logical_volume(
|
|
||||||
dev=dev,
|
|
||||||
lv_type=journal_lv_type,
|
|
||||||
osd_fsid=osd_fsid,
|
|
||||||
size='{}M'.format(calculate_volume_size('journal')),
|
|
||||||
encrypt=encrypt,
|
|
||||||
key_manager=key_manager)
|
|
||||||
)
|
|
||||||
|
|
||||||
cmd.append('--data')
|
cmd.append('--data')
|
||||||
cmd.append(_allocate_logical_volume(dev=dev,
|
cmd.append(_allocate_logical_volume(dev=dev,
|
||||||
lv_type=main_device_type,
|
lv_type=main_device_type,
|
||||||
@ -1773,36 +1732,21 @@ def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False,
|
|||||||
encrypt=encrypt,
|
encrypt=encrypt,
|
||||||
key_manager=key_manager))
|
key_manager=key_manager))
|
||||||
|
|
||||||
if bluestore:
|
for extra_volume in ('wal', 'db'):
|
||||||
for extra_volume in ('wal', 'db'):
|
devices = get_devices('bluestore-{}'.format(extra_volume))
|
||||||
devices = get_devices('bluestore-{}'.format(extra_volume))
|
if devices:
|
||||||
if devices:
|
cmd.append('--block.{}'.format(extra_volume))
|
||||||
cmd.append('--block.{}'.format(extra_volume))
|
least_used = find_least_used_utility_device(devices,
|
||||||
least_used = find_least_used_utility_device(devices,
|
lvs=True)
|
||||||
lvs=True)
|
cmd.append(_allocate_logical_volume(
|
||||||
cmd.append(_allocate_logical_volume(
|
dev=least_used,
|
||||||
dev=least_used,
|
lv_type=extra_volume,
|
||||||
lv_type=extra_volume,
|
osd_fsid=osd_fsid,
|
||||||
osd_fsid=osd_fsid,
|
size='{}M'.format(calculate_volume_size(extra_volume)),
|
||||||
size='{}M'.format(calculate_volume_size(extra_volume)),
|
shared=True,
|
||||||
shared=True,
|
encrypt=encrypt,
|
||||||
encrypt=encrypt,
|
key_manager=key_manager)
|
||||||
key_manager=key_manager)
|
)
|
||||||
)
|
|
||||||
|
|
||||||
elif osd_journal:
|
|
||||||
cmd.append('--journal')
|
|
||||||
least_used = find_least_used_utility_device(osd_journal,
|
|
||||||
lvs=True)
|
|
||||||
cmd.append(_allocate_logical_volume(
|
|
||||||
dev=least_used,
|
|
||||||
lv_type='journal',
|
|
||||||
osd_fsid=osd_fsid,
|
|
||||||
size='{}M'.format(calculate_volume_size('journal')),
|
|
||||||
shared=True,
|
|
||||||
encrypt=encrypt,
|
|
||||||
key_manager=key_manager)
|
|
||||||
)
|
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
|
|
||||||
@ -2040,7 +1984,7 @@ def _allocate_logical_volume(dev, lv_type, osd_fsid,
|
|||||||
return "{}/{}".format(vg_name, lv_name)
|
return "{}/{}".format(vg_name, lv_name)
|
||||||
|
|
||||||
|
|
||||||
def osdize_dir(path, encrypt=False, bluestore=False):
|
def osdize_dir(path, encrypt=False):
|
||||||
"""Ask ceph-disk to prepare a directory to become an OSD.
|
"""Ask ceph-disk to prepare a directory to become an OSD.
|
||||||
|
|
||||||
:param path: str. The directory to osdize
|
:param path: str. The directory to osdize
|
||||||
@ -2077,12 +2021,8 @@ def osdize_dir(path, encrypt=False, bluestore=False):
|
|||||||
if cmp_pkgrevno('ceph', '0.60') >= 0:
|
if cmp_pkgrevno('ceph', '0.60') >= 0:
|
||||||
if encrypt:
|
if encrypt:
|
||||||
cmd.append('--dmcrypt')
|
cmd.append('--dmcrypt')
|
||||||
|
cmd.append('--bluestore')
|
||||||
|
|
||||||
# NOTE(icey): enable experimental bluestore support
|
|
||||||
if cmp_pkgrevno('ceph', '10.2.0') >= 0 and bluestore:
|
|
||||||
cmd.append('--bluestore')
|
|
||||||
elif cmp_pkgrevno('ceph', '12.1.0') >= 0 and not bluestore:
|
|
||||||
cmd.append('--filestore')
|
|
||||||
log("osdize dir cmd: {}".format(cmd))
|
log("osdize dir cmd: {}".format(cmd))
|
||||||
subprocess.check_call(cmd)
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
@ -44,7 +44,7 @@ osd crush initial weight = {{ crush_initial_weight }}
|
|||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
{% if bluestore_experimental and bluestore -%}
|
{% if bluestore_experimental -%}
|
||||||
enable experimental unrecoverable data corrupting features = bluestore rocksdb
|
enable experimental unrecoverable data corrupting features = bluestore rocksdb
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
|
|
||||||
@ -66,7 +66,6 @@ keyring = /var/lib/ceph/mds/$cluster-$id/keyring
|
|||||||
[osd]
|
[osd]
|
||||||
keyring = /var/lib/ceph/osd/$cluster-$id/keyring
|
keyring = /var/lib/ceph/osd/$cluster-$id/keyring
|
||||||
|
|
||||||
{% if bluestore -%}
|
|
||||||
{% if not bluestore_experimental -%}
|
{% if not bluestore_experimental -%}
|
||||||
osd objectstore = bluestore
|
osd objectstore = bluestore
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
@ -77,11 +76,7 @@ bluestore block wal size = {{ bluestore_block_wal_size }}
|
|||||||
bluestore block db size = {{ bluestore_block_db_size }}
|
bluestore block db size = {{ bluestore_block_db_size }}
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
{% include 'section-ceph-bluestore-compression' %}
|
{% include 'section-ceph-bluestore-compression' %}
|
||||||
{%- else %}
|
|
||||||
osd journal size = {{ osd_journal_size }}
|
|
||||||
filestore xattr use omap = true
|
|
||||||
journal dio = {{ dio }}
|
|
||||||
{%- endif %}
|
|
||||||
bdev enable discard = {{ bdev_discard }}
|
bdev enable discard = {{ bdev_discard }}
|
||||||
bdev async discard = {{ bdev_discard }}
|
bdev async discard = {{ bdev_discard }}
|
||||||
|
|
||||||
|
@ -25,17 +25,14 @@ class AddDiskActionTests(CharmTestCase):
|
|||||||
add_disk, ['hookenv', 'kv'])
|
add_disk, ['hookenv', 'kv'])
|
||||||
self.kv.return_value = self.kv
|
self.kv.return_value = self.kv
|
||||||
|
|
||||||
@mock.patch.object(add_disk.charms_ceph.utils, 'use_bluestore')
|
|
||||||
@mock.patch.object(add_disk.ceph_hooks, 'get_journal_devices')
|
@mock.patch.object(add_disk.ceph_hooks, 'get_journal_devices')
|
||||||
@mock.patch.object(add_disk.charms_ceph.utils, 'osdize')
|
@mock.patch.object(add_disk.charms_ceph.utils, 'osdize')
|
||||||
def test_add_device(self, mock_osdize, mock_get_journal_devices,
|
def test_add_device(self, mock_osdize, mock_get_journal_devices):
|
||||||
mock_use_bluestore):
|
|
||||||
|
|
||||||
def fake_config(key):
|
def fake_config(key):
|
||||||
return {
|
return {
|
||||||
'ignore-device-errors': True,
|
'ignore-device-errors': True,
|
||||||
'osd-encrypt': True,
|
'osd-encrypt': True,
|
||||||
'bluestore': True,
|
|
||||||
'osd-encrypt-keymanager': True,
|
'osd-encrypt-keymanager': True,
|
||||||
'autotune': False,
|
'autotune': False,
|
||||||
}.get(key)
|
}.get(key)
|
||||||
@ -43,7 +40,6 @@ class AddDiskActionTests(CharmTestCase):
|
|||||||
self.hookenv.config.side_effect = fake_config
|
self.hookenv.config.side_effect = fake_config
|
||||||
mock_get_journal_devices.return_value = ''
|
mock_get_journal_devices.return_value = ''
|
||||||
self.hookenv.relation_ids.return_value = ['ceph:0']
|
self.hookenv.relation_ids.return_value = ['ceph:0']
|
||||||
mock_use_bluestore.return_value = True
|
|
||||||
|
|
||||||
db = mock.MagicMock()
|
db = mock.MagicMock()
|
||||||
self.kv.return_value = db
|
self.kv.return_value = db
|
||||||
@ -56,7 +52,7 @@ class AddDiskActionTests(CharmTestCase):
|
|||||||
relation_settings={'bootstrapped-osds': 1})
|
relation_settings={'bootstrapped-osds': 1})
|
||||||
self.hookenv.relation_set.assert_has_calls([call])
|
self.hookenv.relation_set.assert_has_calls([call])
|
||||||
mock_osdize.assert_has_calls([mock.call('/dev/myosddev',
|
mock_osdize.assert_has_calls([mock.call('/dev/myosddev',
|
||||||
None, '', True, True, True,
|
None, '', True, True,
|
||||||
True, None)])
|
True, None)])
|
||||||
|
|
||||||
piter = add_disk.PartitionIter(['/dev/cache'], 100, ['/dev/myosddev'])
|
piter = add_disk.PartitionIter(['/dev/cache'], 100, ['/dev/myosddev'])
|
||||||
|
@ -68,7 +68,6 @@ class CephHooksTestCase(unittest.TestCase):
|
|||||||
@patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1")
|
@patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1")
|
||||||
@patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1")
|
@patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1")
|
||||||
@patch.object(ceph_hooks, 'cmp_pkgrevno', lambda *args: 1)
|
@patch.object(ceph_hooks, 'cmp_pkgrevno', lambda *args: 1)
|
||||||
@patch.object(ceph_utils, 'use_bluestore', lambda *args: False)
|
|
||||||
@patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1',
|
@patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1',
|
||||||
'10.0.0.2'])
|
'10.0.0.2'])
|
||||||
@patch.object(ceph_hooks, 'get_networks', lambda *args: "")
|
@patch.object(ceph_hooks, 'get_networks', lambda *args: "")
|
||||||
@ -99,7 +98,6 @@ class CephHooksTestCase(unittest.TestCase):
|
|||||||
'upgrade_in_progress': False,
|
'upgrade_in_progress': False,
|
||||||
'use_syslog': 'true',
|
'use_syslog': 'true',
|
||||||
'bdev_discard': True,
|
'bdev_discard': True,
|
||||||
'bluestore': False,
|
|
||||||
'bluestore_experimental': False,
|
'bluestore_experimental': False,
|
||||||
'bluestore_block_wal_size': 0,
|
'bluestore_block_wal_size': 0,
|
||||||
'bluestore_block_db_size': 0}
|
'bluestore_block_db_size': 0}
|
||||||
@ -116,7 +114,6 @@ class CephHooksTestCase(unittest.TestCase):
|
|||||||
@patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1',
|
@patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1',
|
||||||
'10.0.0.2'])
|
'10.0.0.2'])
|
||||||
@patch.object(ceph_hooks, 'get_networks', lambda *args: "")
|
@patch.object(ceph_hooks, 'get_networks', lambda *args: "")
|
||||||
@patch.object(ceph_utils, 'use_bluestore', lambda *args: False)
|
|
||||||
@patch.object(ceph, 'config')
|
@patch.object(ceph, 'config')
|
||||||
@patch.object(ceph_hooks, 'config')
|
@patch.object(ceph_hooks, 'config')
|
||||||
def test_get_ceph_context_invalid_bdev_enable_discard(self, mock_config,
|
def test_get_ceph_context_invalid_bdev_enable_discard(self, mock_config,
|
||||||
@ -146,7 +143,6 @@ class CephHooksTestCase(unittest.TestCase):
|
|||||||
'upgrade_in_progress': False,
|
'upgrade_in_progress': False,
|
||||||
'use_syslog': 'true',
|
'use_syslog': 'true',
|
||||||
'bdev_discard': False,
|
'bdev_discard': False,
|
||||||
'bluestore': False,
|
|
||||||
'bluestore_experimental': False,
|
'bluestore_experimental': False,
|
||||||
'bluestore_block_wal_size': 0,
|
'bluestore_block_wal_size': 0,
|
||||||
'bluestore_block_db_size': 0}
|
'bluestore_block_db_size': 0}
|
||||||
@ -161,7 +157,6 @@ class CephHooksTestCase(unittest.TestCase):
|
|||||||
@patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1")
|
@patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1")
|
||||||
@patch.object(ceph_hooks, 'cmp_pkgrevno',
|
@patch.object(ceph_hooks, 'cmp_pkgrevno',
|
||||||
lambda pkg, ver: -1 if ver == '12.1.0' else 1)
|
lambda pkg, ver: -1 if ver == '12.1.0' else 1)
|
||||||
@patch.object(ceph_utils, 'use_bluestore', lambda *args: False)
|
|
||||||
@patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1',
|
@patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1',
|
||||||
'10.0.0.2'])
|
'10.0.0.2'])
|
||||||
@patch.object(ceph_hooks, 'get_networks', lambda *args: "")
|
@patch.object(ceph_hooks, 'get_networks', lambda *args: "")
|
||||||
@ -192,7 +187,6 @@ class CephHooksTestCase(unittest.TestCase):
|
|||||||
'upgrade_in_progress': False,
|
'upgrade_in_progress': False,
|
||||||
'use_syslog': 'true',
|
'use_syslog': 'true',
|
||||||
'bdev_discard': True,
|
'bdev_discard': True,
|
||||||
'bluestore': False,
|
|
||||||
'bluestore_experimental': True,
|
'bluestore_experimental': True,
|
||||||
'bluestore_block_wal_size': 0,
|
'bluestore_block_wal_size': 0,
|
||||||
'bluestore_block_db_size': 0}
|
'bluestore_block_db_size': 0}
|
||||||
@ -206,7 +200,6 @@ class CephHooksTestCase(unittest.TestCase):
|
|||||||
@patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1")
|
@patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1")
|
||||||
@patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1")
|
@patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1")
|
||||||
@patch.object(ceph_hooks, 'cmp_pkgrevno', lambda *args: 1)
|
@patch.object(ceph_hooks, 'cmp_pkgrevno', lambda *args: 1)
|
||||||
@patch.object(ceph_utils, 'use_bluestore', lambda *args: True)
|
|
||||||
@patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1',
|
@patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1',
|
||||||
'10.0.0.2'])
|
'10.0.0.2'])
|
||||||
@patch.object(ceph_hooks, 'get_networks', lambda *args: "")
|
@patch.object(ceph_hooks, 'get_networks', lambda *args: "")
|
||||||
@ -244,7 +237,6 @@ class CephHooksTestCase(unittest.TestCase):
|
|||||||
'upgrade_in_progress': False,
|
'upgrade_in_progress': False,
|
||||||
'use_syslog': 'true',
|
'use_syslog': 'true',
|
||||||
'bdev_discard': True,
|
'bdev_discard': True,
|
||||||
'bluestore': True,
|
|
||||||
'bluestore_experimental': False,
|
'bluestore_experimental': False,
|
||||||
'bluestore_block_wal_size': BLUESTORE_WAL_TEST_SIZE,
|
'bluestore_block_wal_size': BLUESTORE_WAL_TEST_SIZE,
|
||||||
'bluestore_block_db_size': BLUESTORE_DB_TEST_SIZE}
|
'bluestore_block_db_size': BLUESTORE_DB_TEST_SIZE}
|
||||||
@ -259,7 +251,6 @@ class CephHooksTestCase(unittest.TestCase):
|
|||||||
@patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1")
|
@patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1")
|
||||||
@patch.object(ceph_hooks, 'cmp_pkgrevno',
|
@patch.object(ceph_hooks, 'cmp_pkgrevno',
|
||||||
lambda pkg, ver: -1 if ver == '12.1.0' else 1)
|
lambda pkg, ver: -1 if ver == '12.1.0' else 1)
|
||||||
@patch.object(ceph_utils, 'use_bluestore', lambda *args: True)
|
|
||||||
@patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1',
|
@patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1',
|
||||||
'10.0.0.2'])
|
'10.0.0.2'])
|
||||||
@patch.object(ceph_hooks, 'get_networks', lambda *args: "")
|
@patch.object(ceph_hooks, 'get_networks', lambda *args: "")
|
||||||
@ -268,7 +259,6 @@ class CephHooksTestCase(unittest.TestCase):
|
|||||||
def test_get_ceph_context_bluestore_old(self, mock_config, mock_config2):
|
def test_get_ceph_context_bluestore_old(self, mock_config, mock_config2):
|
||||||
self.maxDiff = None
|
self.maxDiff = None
|
||||||
config = copy.deepcopy(CHARM_CONFIG)
|
config = copy.deepcopy(CHARM_CONFIG)
|
||||||
config['bluestore'] = True
|
|
||||||
config['bluestore-block-wal-size'] = BLUESTORE_WAL_TEST_SIZE
|
config['bluestore-block-wal-size'] = BLUESTORE_WAL_TEST_SIZE
|
||||||
config['bluestore-block-db-size'] = BLUESTORE_DB_TEST_SIZE
|
config['bluestore-block-db-size'] = BLUESTORE_DB_TEST_SIZE
|
||||||
mock_config.side_effect = lambda key: config[key]
|
mock_config.side_effect = lambda key: config[key]
|
||||||
@ -294,7 +284,6 @@ class CephHooksTestCase(unittest.TestCase):
|
|||||||
'upgrade_in_progress': False,
|
'upgrade_in_progress': False,
|
||||||
'use_syslog': 'true',
|
'use_syslog': 'true',
|
||||||
'bdev_discard': True,
|
'bdev_discard': True,
|
||||||
'bluestore': True,
|
|
||||||
'bluestore_experimental': True,
|
'bluestore_experimental': True,
|
||||||
'bluestore_block_wal_size': BLUESTORE_WAL_TEST_SIZE,
|
'bluestore_block_wal_size': BLUESTORE_WAL_TEST_SIZE,
|
||||||
'bluestore_block_db_size': BLUESTORE_DB_TEST_SIZE}
|
'bluestore_block_db_size': BLUESTORE_DB_TEST_SIZE}
|
||||||
@ -308,7 +297,6 @@ class CephHooksTestCase(unittest.TestCase):
|
|||||||
@patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1")
|
@patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1")
|
||||||
@patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1")
|
@patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1")
|
||||||
@patch.object(ceph_hooks, 'cmp_pkgrevno', lambda *args: 1)
|
@patch.object(ceph_hooks, 'cmp_pkgrevno', lambda *args: 1)
|
||||||
@patch.object(ceph_utils, 'use_bluestore', lambda *args: False)
|
|
||||||
@patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1',
|
@patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1',
|
||||||
'10.0.0.2'])
|
'10.0.0.2'])
|
||||||
@patch.object(ceph_hooks, 'get_networks', lambda *args: "")
|
@patch.object(ceph_hooks, 'get_networks', lambda *args: "")
|
||||||
@ -341,7 +329,6 @@ class CephHooksTestCase(unittest.TestCase):
|
|||||||
'upgrade_in_progress': False,
|
'upgrade_in_progress': False,
|
||||||
'use_syslog': 'true',
|
'use_syslog': 'true',
|
||||||
'bdev_discard': True,
|
'bdev_discard': True,
|
||||||
'bluestore': False,
|
|
||||||
'bluestore_experimental': False,
|
'bluestore_experimental': False,
|
||||||
'bluestore_block_wal_size': 0,
|
'bluestore_block_wal_size': 0,
|
||||||
'bluestore_block_db_size': 0}
|
'bluestore_block_db_size': 0}
|
||||||
@ -355,7 +342,6 @@ class CephHooksTestCase(unittest.TestCase):
|
|||||||
@patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1")
|
@patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1")
|
||||||
@patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1")
|
@patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1")
|
||||||
@patch.object(ceph_hooks, 'cmp_pkgrevno', lambda *args: 1)
|
@patch.object(ceph_hooks, 'cmp_pkgrevno', lambda *args: 1)
|
||||||
@patch.object(ceph_utils, 'use_bluestore', lambda *args: False)
|
|
||||||
@patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1',
|
@patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1',
|
||||||
'10.0.0.2'])
|
'10.0.0.2'])
|
||||||
@patch.object(ceph_hooks, 'get_networks', lambda *args: "")
|
@patch.object(ceph_hooks, 'get_networks', lambda *args: "")
|
||||||
@ -390,7 +376,6 @@ class CephHooksTestCase(unittest.TestCase):
|
|||||||
'upgrade_in_progress': False,
|
'upgrade_in_progress': False,
|
||||||
'use_syslog': 'true',
|
'use_syslog': 'true',
|
||||||
'bdev_discard': True,
|
'bdev_discard': True,
|
||||||
'bluestore': False,
|
|
||||||
'bluestore_experimental': False,
|
'bluestore_experimental': False,
|
||||||
'bluestore_block_wal_size': 0,
|
'bluestore_block_wal_size': 0,
|
||||||
'bluestore_block_db_size': 0}
|
'bluestore_block_db_size': 0}
|
||||||
@ -440,7 +425,6 @@ class CephHooksTestCase(unittest.TestCase):
|
|||||||
'upgrade_in_progress': False,
|
'upgrade_in_progress': False,
|
||||||
'use_syslog': 'true',
|
'use_syslog': 'true',
|
||||||
'bdev_discard': True,
|
'bdev_discard': True,
|
||||||
'bluestore': False,
|
|
||||||
'bluestore_experimental': False,
|
'bluestore_experimental': False,
|
||||||
'bluestore_block_wal_size': 0,
|
'bluestore_block_wal_size': 0,
|
||||||
'bluestore_block_db_size': 0,
|
'bluestore_block_db_size': 0,
|
||||||
|
@ -15,7 +15,9 @@ class UpgradeRollingTestCase(CharmTestCase):
|
|||||||
@patch('ceph_hooks.emit_cephconf')
|
@patch('ceph_hooks.emit_cephconf')
|
||||||
@patch('ceph_hooks.hookenv')
|
@patch('ceph_hooks.hookenv')
|
||||||
@patch('ceph_hooks.ceph.roll_osd_cluster')
|
@patch('ceph_hooks.ceph.roll_osd_cluster')
|
||||||
def test_check_for_upgrade(self, roll_osd_cluster, hookenv,
|
@patch('utils.find_filestore_osds')
|
||||||
|
def test_check_for_upgrade(self, find_filestore_osds,
|
||||||
|
roll_osd_cluster, hookenv,
|
||||||
emit_cephconf, version, exists,
|
emit_cephconf, version, exists,
|
||||||
dirs_need_ownership_update,
|
dirs_need_ownership_update,
|
||||||
notify_mon_of_upgrade):
|
notify_mon_of_upgrade):
|
||||||
@ -47,7 +49,9 @@ class UpgradeRollingTestCase(CharmTestCase):
|
|||||||
@patch('ceph_hooks.emit_cephconf')
|
@patch('ceph_hooks.emit_cephconf')
|
||||||
@patch('ceph_hooks.hookenv')
|
@patch('ceph_hooks.hookenv')
|
||||||
@patch('ceph_hooks.ceph.roll_osd_cluster')
|
@patch('ceph_hooks.ceph.roll_osd_cluster')
|
||||||
def test_resume_failed_upgrade(self, roll_osd_cluster,
|
@patch('utils.find_filestore_osds')
|
||||||
|
def test_resume_failed_upgrade(self, find_filestore_osds,
|
||||||
|
roll_osd_cluster,
|
||||||
hookenv, emit_cephconf, version,
|
hookenv, emit_cephconf, version,
|
||||||
exists,
|
exists,
|
||||||
dirs_need_ownership_update,
|
dirs_need_ownership_update,
|
||||||
@ -94,7 +98,9 @@ class UpgradeRollingTestCase(CharmTestCase):
|
|||||||
@patch('ceph_hooks.ceph.is_bootstrapped')
|
@patch('ceph_hooks.ceph.is_bootstrapped')
|
||||||
@patch('ceph_hooks.hookenv')
|
@patch('ceph_hooks.hookenv')
|
||||||
@patch('ceph_hooks.ceph.roll_monitor_cluster')
|
@patch('ceph_hooks.ceph.roll_monitor_cluster')
|
||||||
def test_check_for_upgrade_from_pike_to_queens(self, roll_monitor_cluster,
|
@patch('utils.find_filestore_osds')
|
||||||
|
def test_check_for_upgrade_from_pike_to_queens(self, find_filestore_osds,
|
||||||
|
roll_monitor_cluster,
|
||||||
hookenv, is_bootstrapped,
|
hookenv, is_bootstrapped,
|
||||||
add_source,
|
add_source,
|
||||||
dirs_need_ownership_update,
|
dirs_need_ownership_update,
|
||||||
@ -116,7 +122,9 @@ class UpgradeRollingTestCase(CharmTestCase):
|
|||||||
@patch('ceph_hooks.ceph.is_bootstrapped')
|
@patch('ceph_hooks.ceph.is_bootstrapped')
|
||||||
@patch('ceph_hooks.hookenv')
|
@patch('ceph_hooks.hookenv')
|
||||||
@patch('ceph_hooks.ceph.roll_monitor_cluster')
|
@patch('ceph_hooks.ceph.roll_monitor_cluster')
|
||||||
def test_check_for_upgrade_from_rocky_to_stein(self, roll_monitor_cluster,
|
@patch('utils.find_filestore_osds')
|
||||||
|
def test_check_for_upgrade_from_rocky_to_stein(self, find_filestore_osds,
|
||||||
|
roll_monitor_cluster,
|
||||||
hookenv, is_bootstrapped,
|
hookenv, is_bootstrapped,
|
||||||
add_source,
|
add_source,
|
||||||
dirs_need_ownership_update,
|
dirs_need_ownership_update,
|
||||||
@ -132,6 +140,30 @@ class UpgradeRollingTestCase(CharmTestCase):
|
|||||||
roll_monitor_cluster.assert_not_called()
|
roll_monitor_cluster.assert_not_called()
|
||||||
add_source.assert_called_with('cloud:bionic-stein', 'some-key')
|
add_source.assert_called_with('cloud:bionic-stein', 'some-key')
|
||||||
|
|
||||||
|
@patch('ceph_hooks.os.path.exists')
|
||||||
|
@patch('ceph_hooks.ceph.dirs_need_ownership_update')
|
||||||
|
@patch('ceph_hooks.add_source')
|
||||||
|
@patch('ceph_hooks.ceph.is_bootstrapped')
|
||||||
|
@patch('ceph_hooks.hookenv')
|
||||||
|
@patch('ceph_hooks.ceph.roll_monitor_cluster')
|
||||||
|
@patch('utils.find_filestore_osds')
|
||||||
|
def test_check_for_upgrade_reef_filestore(self, find_filestore_osds,
|
||||||
|
roll_monitor_cluster,
|
||||||
|
hookenv, is_bootstrapped,
|
||||||
|
add_source,
|
||||||
|
dirs_need_ownership_update,
|
||||||
|
exists):
|
||||||
|
exists.return_value = True
|
||||||
|
is_bootstrapped.return_value = True
|
||||||
|
find_filestore_osds.return_value = ['ceph-0']
|
||||||
|
hookenv.config.side_effect = self.test_config
|
||||||
|
self.test_config.set('key', 'some-key')
|
||||||
|
self.test_config.set_previous('source', 'cloud:jammy-antelope')
|
||||||
|
self.test_config.set('source', 'cloud:jammy-bobcat')
|
||||||
|
check_for_upgrade()
|
||||||
|
roll_monitor_cluster.assert_not_called()
|
||||||
|
dirs_need_ownership_update.assert_not_called()
|
||||||
|
|
||||||
|
|
||||||
class UpgradeUtilTestCase(CharmTestCase):
|
class UpgradeUtilTestCase(CharmTestCase):
|
||||||
@patch('ceph_hooks.relation_ids')
|
@patch('ceph_hooks.relation_ids')
|
||||||
|
Loading…
Reference in New Issue
Block a user