Remove FileStore support

Remove support for creating FileStore OSDs. Also prevent upgrade
attempts to Reef if a FileStore OSD is detected

Change-Id: I9609bc0222365cb1f4059312b466a12ef4e0397f
This commit is contained in:
Peter Sabaini 2023-10-05 14:33:07 +02:00
parent ceb8187284
commit 1bac66ee50
9 changed files with 111 additions and 144 deletions

View File

@ -115,7 +115,6 @@ def add_device(request, device_path, bucket=None,
ceph_hooks.get_journal_devices(),
hookenv.config('ignore-device-errors'),
hookenv.config('osd-encrypt'),
charms_ceph.utils.use_bluestore(),
hookenv.config('osd-encrypt-keymanager'),
osd_id)

View File

@ -90,15 +90,6 @@ options:
where the specified journal device does not exist on a node.
.
Only supported with ceph >= 0.48.3.
bluestore:
type: boolean
default: True
description: |
Enable BlueStore storage backend for OSD devices.
.
Only supported with ceph >= 12.2.0.
.
Setting to 'False' will use FileStore as the storage format.
bluestore-wal:
type: string
default:

View File

@ -26,6 +26,8 @@ import subprocess
import sys
import traceback
import utils
sys.path.append('lib')
import charms_ceph.utils as ceph
from charmhelpers.core import hookenv
@ -147,6 +149,14 @@ def check_for_upgrade():
'distro')
new_version_os = get_os_codename_install_source(hookenv.config('source'))
# If the new version is reef, and we detect that we are running FileStore
# bail out with an error message
filestore_osds = utils.find_filestore_osds()
if new_version == 'reef' and filestore_osds:
log("Refuse to upgrade to reef with FileStore OSDs present: {}".format(
filestore_osds), level=ERROR)
return
# May be in a previous upgrade that was failed if the directories
# still need an ownership update. Check this condition.
resuming_upgrade = ceph.dirs_need_ownership_update('osd')
@ -464,7 +474,6 @@ def get_ceph_context(upgrading=False):
'dio': str(config('use-direct-io')).lower(),
'short_object_len': use_short_objects(),
'upgrade_in_progress': upgrading,
'bluestore': ceph.use_bluestore(),
'bluestore_experimental': cmp_pkgrevno('ceph', '12.1.0') < 0,
'bluestore_block_wal_size': config('bluestore-block-wal-size'),
'bluestore_block_db_size': config('bluestore-block-db-size'),
@ -619,13 +628,11 @@ def prepare_disks_and_activate():
log('ceph bootstrapped, rescanning disks')
emit_cephconf()
ceph.udevadm_settle()
bluestore = ceph.use_bluestore()
for dev in get_devices():
ceph.osdize(dev, config('osd-format'),
osd_journal,
config('ignore-device-errors'),
config('osd-encrypt'),
bluestore,
config('osd-encrypt-keymanager'))
# Make it fast!
if config('autotune'):

View File

@ -692,3 +692,26 @@ def get_parent_device(dev):
return '/dev/' + child['name']
return dev
def find_filestore_osds():
# Path to Ceph OSD
osd_path = '/var/lib/ceph/osd'
# Search through OSD directories in path starting with 'ceph-'
dirs = [d for d in os.listdir(osd_path)
if d.startswith('ceph-')
and os.path.isdir(os.path.join(osd_path, d))]
found = []
for dir in dirs:
# Construct the full path
type_file_path = os.path.join(osd_path, dir, 'type')
# Open and read the type file
with open(type_file_path, 'r') as f:
content = f.read()
# Check if the content includes 'filestore'
if 'filestore' in content:
found.append(dir)
return found

View File

@ -1324,16 +1324,6 @@ def systemd():
return CompareHostReleases(lsb_release()['DISTRIB_CODENAME']) >= 'vivid'
def use_bluestore():
"""Determine whether bluestore should be used for OSD's
:returns: whether bluestore disk format should be used
:rtype: bool"""
if cmp_pkgrevno('ceph', '12.2.0') < 0:
return False
return config('bluestore')
def bootstrap_monitor_cluster(secret):
"""Bootstrap local Ceph mon into the Ceph cluster
@ -1551,21 +1541,21 @@ def get_devices(name):
def osdize(dev, osd_format, osd_journal, ignore_errors=False, encrypt=False,
bluestore=False, key_manager=CEPH_KEY_MANAGER, osd_id=None):
key_manager=CEPH_KEY_MANAGER, osd_id=None):
if dev.startswith('/dev'):
osdize_dev(dev, osd_format, osd_journal,
ignore_errors, encrypt,
bluestore, key_manager, osd_id)
key_manager, osd_id)
else:
if cmp_pkgrevno('ceph', '14.0.0') >= 0:
log("Directory backed OSDs can not be created on Nautilus",
level=WARNING)
return
osdize_dir(dev, encrypt, bluestore)
osdize_dir(dev, encrypt)
def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False,
encrypt=False, bluestore=False, key_manager=CEPH_KEY_MANAGER,
encrypt=False, key_manager=CEPH_KEY_MANAGER,
osd_id=None):
"""
Prepare a block device for use as a Ceph OSD
@ -1579,7 +1569,6 @@ def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False,
:param: ignore_errors: Don't fail in the event of any errors during
processing
:param: encrypt: Encrypt block devices using 'key_manager'
:param: bluestore: Use bluestore native Ceph block device format
:param: key_manager: Key management approach for encryption keys
:raises subprocess.CalledProcessError: in the event that any supporting
subprocess operation failed
@ -1630,15 +1619,13 @@ def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False,
cmd = _ceph_volume(dev,
osd_journal,
encrypt,
bluestore,
key_manager,
osd_id)
else:
cmd = _ceph_disk(dev,
osd_format,
osd_journal,
encrypt,
bluestore)
encrypt)
try:
status_set('maintenance', 'Initializing device {}'.format(dev))
@ -1669,7 +1656,7 @@ def osdize_dev(dev, osd_format, osd_journal, ignore_errors=False,
db.flush()
def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False):
def _ceph_disk(dev, osd_format, osd_journal, encrypt=False):
"""
Prepare a device for usage as a Ceph OSD using ceph-disk
@ -1677,7 +1664,6 @@ def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False):
The function looks up realpath of the device
:param: osd_journal: List of block devices to use for OSD journals
:param: encrypt: Use block device encryption (unsupported)
:param: bluestore: Use bluestore storage for OSD
:returns: list. 'ceph-disk' command and required parameters for
execution by check_call
"""
@ -1686,12 +1672,6 @@ def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False):
if encrypt:
cmd.append('--dmcrypt')
if osd_format and not bluestore:
cmd.append('--fs-type')
cmd.append(osd_format)
# NOTE(jamespage): enable experimental bluestore support
if use_bluestore():
cmd.append('--bluestore')
wal = get_devices('bluestore-wal')
if wal:
@ -1703,8 +1683,6 @@ def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False):
cmd.append('--block.db')
least_used_db = find_least_used_utility_device(db)
cmd.append(least_used_db)
elif cmp_pkgrevno('ceph', '12.1.0') >= 0 and not bluestore:
cmd.append('--filestore')
cmd.append(os.path.realpath(dev))
@ -1715,8 +1693,8 @@ def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False):
return cmd
def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False,
key_manager=CEPH_KEY_MANAGER, osd_id=None):
def _ceph_volume(dev, osd_journal, encrypt=False, key_manager=CEPH_KEY_MANAGER,
osd_id=None):
"""
Prepare and activate a device for usage as a Ceph OSD using ceph-volume.
@ -1726,7 +1704,6 @@ def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False,
:param: dev: Full path to use for OSD block device setup
:param: osd_journal: List of block devices to use for OSD journals
:param: encrypt: Use block device encryption
:param: bluestore: Use bluestore storage for OSD
:param: key_manager: dm-crypt Key Manager to use
:param: osd_id: The OSD-id to recycle, or None to create a new one
:raises subprocess.CalledProcessError: in the event that any supporting
@ -1739,13 +1716,8 @@ def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False,
osd_fsid = str(uuid.uuid4())
cmd.append('--osd-fsid')
cmd.append(osd_fsid)
if bluestore:
cmd.append('--bluestore')
main_device_type = 'block'
else:
cmd.append('--filestore')
main_device_type = 'data'
if encrypt and key_manager == CEPH_KEY_MANAGER:
cmd.append('--dmcrypt')
@ -1753,19 +1725,6 @@ def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False,
if osd_id is not None:
cmd.extend(['--osd-id', str(osd_id)])
# On-disk journal volume creation
if not osd_journal and not bluestore:
journal_lv_type = 'journal'
cmd.append('--journal')
cmd.append(_allocate_logical_volume(
dev=dev,
lv_type=journal_lv_type,
osd_fsid=osd_fsid,
size='{}M'.format(calculate_volume_size('journal')),
encrypt=encrypt,
key_manager=key_manager)
)
cmd.append('--data')
cmd.append(_allocate_logical_volume(dev=dev,
lv_type=main_device_type,
@ -1773,7 +1732,6 @@ def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False,
encrypt=encrypt,
key_manager=key_manager))
if bluestore:
for extra_volume in ('wal', 'db'):
devices = get_devices('bluestore-{}'.format(extra_volume))
if devices:
@ -1790,20 +1748,6 @@ def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False,
key_manager=key_manager)
)
elif osd_journal:
cmd.append('--journal')
least_used = find_least_used_utility_device(osd_journal,
lvs=True)
cmd.append(_allocate_logical_volume(
dev=least_used,
lv_type='journal',
osd_fsid=osd_fsid,
size='{}M'.format(calculate_volume_size('journal')),
shared=True,
encrypt=encrypt,
key_manager=key_manager)
)
return cmd
@ -2040,7 +1984,7 @@ def _allocate_logical_volume(dev, lv_type, osd_fsid,
return "{}/{}".format(vg_name, lv_name)
def osdize_dir(path, encrypt=False, bluestore=False):
def osdize_dir(path, encrypt=False):
"""Ask ceph-disk to prepare a directory to become an OSD.
:param path: str. The directory to osdize
@ -2077,12 +2021,8 @@ def osdize_dir(path, encrypt=False, bluestore=False):
if cmp_pkgrevno('ceph', '0.60') >= 0:
if encrypt:
cmd.append('--dmcrypt')
# NOTE(icey): enable experimental bluestore support
if cmp_pkgrevno('ceph', '10.2.0') >= 0 and bluestore:
cmd.append('--bluestore')
elif cmp_pkgrevno('ceph', '12.1.0') >= 0 and not bluestore:
cmd.append('--filestore')
log("osdize dir cmd: {}".format(cmd))
subprocess.check_call(cmd)

View File

@ -44,7 +44,7 @@ osd crush initial weight = {{ crush_initial_weight }}
{% endfor %}
{% endif %}
{% if bluestore_experimental and bluestore -%}
{% if bluestore_experimental -%}
enable experimental unrecoverable data corrupting features = bluestore rocksdb
{%- endif %}
@ -66,7 +66,6 @@ keyring = /var/lib/ceph/mds/$cluster-$id/keyring
[osd]
keyring = /var/lib/ceph/osd/$cluster-$id/keyring
{% if bluestore -%}
{% if not bluestore_experimental -%}
osd objectstore = bluestore
{%- endif %}
@ -77,11 +76,7 @@ bluestore block wal size = {{ bluestore_block_wal_size }}
bluestore block db size = {{ bluestore_block_db_size }}
{%- endif %}
{% include 'section-ceph-bluestore-compression' %}
{%- else %}
osd journal size = {{ osd_journal_size }}
filestore xattr use omap = true
journal dio = {{ dio }}
{%- endif %}
bdev enable discard = {{ bdev_discard }}
bdev async discard = {{ bdev_discard }}

View File

@ -25,17 +25,14 @@ class AddDiskActionTests(CharmTestCase):
add_disk, ['hookenv', 'kv'])
self.kv.return_value = self.kv
@mock.patch.object(add_disk.charms_ceph.utils, 'use_bluestore')
@mock.patch.object(add_disk.ceph_hooks, 'get_journal_devices')
@mock.patch.object(add_disk.charms_ceph.utils, 'osdize')
def test_add_device(self, mock_osdize, mock_get_journal_devices,
mock_use_bluestore):
def test_add_device(self, mock_osdize, mock_get_journal_devices):
def fake_config(key):
return {
'ignore-device-errors': True,
'osd-encrypt': True,
'bluestore': True,
'osd-encrypt-keymanager': True,
'autotune': False,
}.get(key)
@ -43,7 +40,6 @@ class AddDiskActionTests(CharmTestCase):
self.hookenv.config.side_effect = fake_config
mock_get_journal_devices.return_value = ''
self.hookenv.relation_ids.return_value = ['ceph:0']
mock_use_bluestore.return_value = True
db = mock.MagicMock()
self.kv.return_value = db
@ -56,7 +52,7 @@ class AddDiskActionTests(CharmTestCase):
relation_settings={'bootstrapped-osds': 1})
self.hookenv.relation_set.assert_has_calls([call])
mock_osdize.assert_has_calls([mock.call('/dev/myosddev',
None, '', True, True, True,
None, '', True, True,
True, None)])
piter = add_disk.PartitionIter(['/dev/cache'], 100, ['/dev/myosddev'])

View File

@ -68,7 +68,6 @@ class CephHooksTestCase(unittest.TestCase):
@patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1")
@patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1")
@patch.object(ceph_hooks, 'cmp_pkgrevno', lambda *args: 1)
@patch.object(ceph_utils, 'use_bluestore', lambda *args: False)
@patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1',
'10.0.0.2'])
@patch.object(ceph_hooks, 'get_networks', lambda *args: "")
@ -99,7 +98,6 @@ class CephHooksTestCase(unittest.TestCase):
'upgrade_in_progress': False,
'use_syslog': 'true',
'bdev_discard': True,
'bluestore': False,
'bluestore_experimental': False,
'bluestore_block_wal_size': 0,
'bluestore_block_db_size': 0}
@ -116,7 +114,6 @@ class CephHooksTestCase(unittest.TestCase):
@patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1',
'10.0.0.2'])
@patch.object(ceph_hooks, 'get_networks', lambda *args: "")
@patch.object(ceph_utils, 'use_bluestore', lambda *args: False)
@patch.object(ceph, 'config')
@patch.object(ceph_hooks, 'config')
def test_get_ceph_context_invalid_bdev_enable_discard(self, mock_config,
@ -146,7 +143,6 @@ class CephHooksTestCase(unittest.TestCase):
'upgrade_in_progress': False,
'use_syslog': 'true',
'bdev_discard': False,
'bluestore': False,
'bluestore_experimental': False,
'bluestore_block_wal_size': 0,
'bluestore_block_db_size': 0}
@ -161,7 +157,6 @@ class CephHooksTestCase(unittest.TestCase):
@patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1")
@patch.object(ceph_hooks, 'cmp_pkgrevno',
lambda pkg, ver: -1 if ver == '12.1.0' else 1)
@patch.object(ceph_utils, 'use_bluestore', lambda *args: False)
@patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1',
'10.0.0.2'])
@patch.object(ceph_hooks, 'get_networks', lambda *args: "")
@ -192,7 +187,6 @@ class CephHooksTestCase(unittest.TestCase):
'upgrade_in_progress': False,
'use_syslog': 'true',
'bdev_discard': True,
'bluestore': False,
'bluestore_experimental': True,
'bluestore_block_wal_size': 0,
'bluestore_block_db_size': 0}
@ -206,7 +200,6 @@ class CephHooksTestCase(unittest.TestCase):
@patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1")
@patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1")
@patch.object(ceph_hooks, 'cmp_pkgrevno', lambda *args: 1)
@patch.object(ceph_utils, 'use_bluestore', lambda *args: True)
@patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1',
'10.0.0.2'])
@patch.object(ceph_hooks, 'get_networks', lambda *args: "")
@ -244,7 +237,6 @@ class CephHooksTestCase(unittest.TestCase):
'upgrade_in_progress': False,
'use_syslog': 'true',
'bdev_discard': True,
'bluestore': True,
'bluestore_experimental': False,
'bluestore_block_wal_size': BLUESTORE_WAL_TEST_SIZE,
'bluestore_block_db_size': BLUESTORE_DB_TEST_SIZE}
@ -259,7 +251,6 @@ class CephHooksTestCase(unittest.TestCase):
@patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1")
@patch.object(ceph_hooks, 'cmp_pkgrevno',
lambda pkg, ver: -1 if ver == '12.1.0' else 1)
@patch.object(ceph_utils, 'use_bluestore', lambda *args: True)
@patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1',
'10.0.0.2'])
@patch.object(ceph_hooks, 'get_networks', lambda *args: "")
@ -268,7 +259,6 @@ class CephHooksTestCase(unittest.TestCase):
def test_get_ceph_context_bluestore_old(self, mock_config, mock_config2):
self.maxDiff = None
config = copy.deepcopy(CHARM_CONFIG)
config['bluestore'] = True
config['bluestore-block-wal-size'] = BLUESTORE_WAL_TEST_SIZE
config['bluestore-block-db-size'] = BLUESTORE_DB_TEST_SIZE
mock_config.side_effect = lambda key: config[key]
@ -294,7 +284,6 @@ class CephHooksTestCase(unittest.TestCase):
'upgrade_in_progress': False,
'use_syslog': 'true',
'bdev_discard': True,
'bluestore': True,
'bluestore_experimental': True,
'bluestore_block_wal_size': BLUESTORE_WAL_TEST_SIZE,
'bluestore_block_db_size': BLUESTORE_DB_TEST_SIZE}
@ -308,7 +297,6 @@ class CephHooksTestCase(unittest.TestCase):
@patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1")
@patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1")
@patch.object(ceph_hooks, 'cmp_pkgrevno', lambda *args: 1)
@patch.object(ceph_utils, 'use_bluestore', lambda *args: False)
@patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1',
'10.0.0.2'])
@patch.object(ceph_hooks, 'get_networks', lambda *args: "")
@ -341,7 +329,6 @@ class CephHooksTestCase(unittest.TestCase):
'upgrade_in_progress': False,
'use_syslog': 'true',
'bdev_discard': True,
'bluestore': False,
'bluestore_experimental': False,
'bluestore_block_wal_size': 0,
'bluestore_block_db_size': 0}
@ -355,7 +342,6 @@ class CephHooksTestCase(unittest.TestCase):
@patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1")
@patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1")
@patch.object(ceph_hooks, 'cmp_pkgrevno', lambda *args: 1)
@patch.object(ceph_utils, 'use_bluestore', lambda *args: False)
@patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1',
'10.0.0.2'])
@patch.object(ceph_hooks, 'get_networks', lambda *args: "")
@ -390,7 +376,6 @@ class CephHooksTestCase(unittest.TestCase):
'upgrade_in_progress': False,
'use_syslog': 'true',
'bdev_discard': True,
'bluestore': False,
'bluestore_experimental': False,
'bluestore_block_wal_size': 0,
'bluestore_block_db_size': 0}
@ -440,7 +425,6 @@ class CephHooksTestCase(unittest.TestCase):
'upgrade_in_progress': False,
'use_syslog': 'true',
'bdev_discard': True,
'bluestore': False,
'bluestore_experimental': False,
'bluestore_block_wal_size': 0,
'bluestore_block_db_size': 0,

View File

@ -15,7 +15,9 @@ class UpgradeRollingTestCase(CharmTestCase):
@patch('ceph_hooks.emit_cephconf')
@patch('ceph_hooks.hookenv')
@patch('ceph_hooks.ceph.roll_osd_cluster')
def test_check_for_upgrade(self, roll_osd_cluster, hookenv,
@patch('utils.find_filestore_osds')
def test_check_for_upgrade(self, find_filestore_osds,
roll_osd_cluster, hookenv,
emit_cephconf, version, exists,
dirs_need_ownership_update,
notify_mon_of_upgrade):
@ -47,7 +49,9 @@ class UpgradeRollingTestCase(CharmTestCase):
@patch('ceph_hooks.emit_cephconf')
@patch('ceph_hooks.hookenv')
@patch('ceph_hooks.ceph.roll_osd_cluster')
def test_resume_failed_upgrade(self, roll_osd_cluster,
@patch('utils.find_filestore_osds')
def test_resume_failed_upgrade(self, find_filestore_osds,
roll_osd_cluster,
hookenv, emit_cephconf, version,
exists,
dirs_need_ownership_update,
@ -94,7 +98,9 @@ class UpgradeRollingTestCase(CharmTestCase):
@patch('ceph_hooks.ceph.is_bootstrapped')
@patch('ceph_hooks.hookenv')
@patch('ceph_hooks.ceph.roll_monitor_cluster')
def test_check_for_upgrade_from_pike_to_queens(self, roll_monitor_cluster,
@patch('utils.find_filestore_osds')
def test_check_for_upgrade_from_pike_to_queens(self, find_filestore_osds,
roll_monitor_cluster,
hookenv, is_bootstrapped,
add_source,
dirs_need_ownership_update,
@ -116,7 +122,9 @@ class UpgradeRollingTestCase(CharmTestCase):
@patch('ceph_hooks.ceph.is_bootstrapped')
@patch('ceph_hooks.hookenv')
@patch('ceph_hooks.ceph.roll_monitor_cluster')
def test_check_for_upgrade_from_rocky_to_stein(self, roll_monitor_cluster,
@patch('utils.find_filestore_osds')
def test_check_for_upgrade_from_rocky_to_stein(self, find_filestore_osds,
roll_monitor_cluster,
hookenv, is_bootstrapped,
add_source,
dirs_need_ownership_update,
@ -132,6 +140,30 @@ class UpgradeRollingTestCase(CharmTestCase):
roll_monitor_cluster.assert_not_called()
add_source.assert_called_with('cloud:bionic-stein', 'some-key')
@patch('ceph_hooks.os.path.exists')
@patch('ceph_hooks.ceph.dirs_need_ownership_update')
@patch('ceph_hooks.add_source')
@patch('ceph_hooks.ceph.is_bootstrapped')
@patch('ceph_hooks.hookenv')
@patch('ceph_hooks.ceph.roll_monitor_cluster')
@patch('utils.find_filestore_osds')
def test_check_for_upgrade_reef_filestore(self, find_filestore_osds,
roll_monitor_cluster,
hookenv, is_bootstrapped,
add_source,
dirs_need_ownership_update,
exists):
exists.return_value = True
is_bootstrapped.return_value = True
find_filestore_osds.return_value = ['ceph-0']
hookenv.config.side_effect = self.test_config
self.test_config.set('key', 'some-key')
self.test_config.set_previous('source', 'cloud:jammy-antelope')
self.test_config.set('source', 'cloud:jammy-bobcat')
check_for_upgrade()
roll_monitor_cluster.assert_not_called()
dirs_need_ownership_update.assert_not_called()
class UpgradeUtilTestCase(CharmTestCase):
@patch('ceph_hooks.relation_ids')