Fix Ceph upgrade issue by porting charm to common framework

Move helpers as-is from reactive handler module to charm class.

Set default of ``source`` configuration option to 'distro' to be
in line with other reactive charms and the expectations of the
``openstack.utils.get_source_and_pgp_key`` function.

Unpin flake8.

Func-Test-Pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/313
Depends-On: I6fbde855ba8f83ef5e265bd5b5dfb0d01eae830b
Change-Id: I2d8e8b244db935673406d2cce9ef34e7252cb50b
Closes-Bug: #1879072
This commit is contained in:
Frode Nordahl 2020-06-09 15:42:21 +02:00
parent bf0e4724e7
commit ff7ff85779
No known key found for this signature in database
GPG Key ID: 6A5D59A3BA48373F
14 changed files with 443 additions and 322 deletions

View File

@ -5,7 +5,7 @@ options:
description: Mon and OSD debug level. Max is 20.
source:
type: string
default:
default: distro
description: |
Optional configuration to support use of additional sources such as:
- ppa:myteam/ppa

View File

@ -1,5 +1,14 @@
includes: ['layer:basic', 'layer:apt', 'interface:ceph-mds']
includes: ['layer:ceph', 'interface:ceph-mds']
options:
status:
patch-hookenv: False
basic:
use_venv: True
include_system_packages: False
repo: https://git.openstack.org/openstack/charm-ceph-fs
config:
deletes:
- debug
- ssl_ca
- ssl_cert
- ssl_key
- use-internal-endpoints
- verbose

13
src/lib/__init__.py Normal file
View File

@ -0,0 +1,13 @@
# Copyright 2020 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

13
src/lib/charm/__init__.py Normal file
View File

@ -0,0 +1,13 @@
# Copyright 2020 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -0,0 +1,13 @@
# Copyright 2020 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -0,0 +1,172 @@
# Copyright 2020 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
import dns.resolver
import charms_openstack.adapters
import charms_openstack.charm
import charms_openstack.plugins
import charmhelpers.core as ch_core
# NOTE(fnordahl) theese out of style imports are here to help keeping helpers
# moved from reactive module as-is to make the diff managable. At some point
# in time we should replace them in favor of common helpers that would do the
# same job.
from charmhelpers.core.hookenv import (
config, log, cached, DEBUG, unit_get,
network_get_primary_address,
status_set)
from charmhelpers.contrib.network.ip import (
get_address_in_network,
get_ipv6_addr)
charms_openstack.charm.use_defaults('charm.default-select-release')
class CephFSCharmConfigurationAdapter(
charms_openstack.adapters.ConfigurationAdapter):
@property
def hostname(self):
return self.charm_instance.hostname
@property
def mds_name(self):
return self.charm_instance.hostname
@property
def networks(self):
return self.charm_instance.get_networks('ceph-public-network')
@property
def public_addr(self):
if ch_core.hookenv.config('prefer-ipv6'):
return get_ipv6_addr()[0]
else:
return self.charm_instance.get_public_addr()
class CephFSCharmRelationAdapters(
charms_openstack.adapters.OpenStackRelationAdapters):
relation_adapters = {
'ceph-mds': charms_openstack.plugins.CephRelationAdapter,
}
class BaseCephFSCharm(charms_openstack.plugins.CephCharm):
abstract_class = True
name = 'ceph-fs'
python_version = 3
required_relations = ['ceph-mds']
user = 'ceph'
group = 'ceph'
adapters_class = CephFSCharmRelationAdapters
configuration_class = CephFSCharmConfigurationAdapter
ceph_service_type = charms_openstack.plugins.CephCharm.CephServiceType.mds
ceph_service_name_override = 'mds'
ceph_key_per_unit_name = True
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.services = [
'ceph-mds@{}'.format(self.hostname),
]
self.restart_map = {
'/etc/ceph/ceph.conf': self.services,
}
# NOTE(fnordahl) moved from reactive handler module, otherwise keeping
# these as-is to make the diff managable. At some point in time we should
# replace them in favor of common helpers that would do the same job.
@staticmethod
def get_networks(config_opt='ceph-public-network'):
"""Get all configured networks from provided config option.
If public network(s) are provided, go through them and return those for
which we have an address configured.
"""
networks = config(config_opt)
if networks:
networks = networks.split()
return [n for n in networks if get_address_in_network(n)]
return []
@cached
def get_public_addr(self):
if config('ceph-public-network'):
return self.get_network_addrs('ceph-public-network')[0]
try:
return network_get_primary_address('public')
except NotImplementedError:
log("network-get not supported", DEBUG)
return self.get_host_ip()
@cached
@staticmethod
def get_host_ip(hostname=None):
if config('prefer-ipv6'):
return get_ipv6_addr()[0]
hostname = hostname or unit_get('private-address')
try:
# Test to see if already an IPv4 address
socket.inet_aton(hostname)
return hostname
except socket.error:
# This may throw an NXDOMAIN exception; in which case
# things are badly broken so just let it kill the hook
answers = dns.resolver.query(hostname, 'A')
if answers:
return answers[0].address
def get_network_addrs(self, config_opt):
"""Get all configured public networks addresses.
If public network(s) are provided, go through them and return the
addresses we have configured on any of those networks.
"""
addrs = []
networks = config(config_opt)
if networks:
networks = networks.split()
addrs = [get_address_in_network(n) for n in networks]
addrs = [a for a in addrs if a]
if not addrs:
if networks:
msg = ("Could not find an address on any of '%s' - resolve "
"this error to retry" % networks)
status_set('blocked', msg)
raise Exception(msg)
else:
return [self.get_host_ip()]
return addrs
class MitakaCephFSCharm(BaseCephFSCharm):
release = 'mitaka'
packages = ['ceph-mds', 'gdisk', 'ntp', 'btrfs-tools', 'xfsprogs']
class UssuriCephFSCharm(BaseCephFSCharm):
release = 'ussuri'
packages = ['ceph-mds', 'gdisk', 'ntp', 'btrfs-progs', 'xfsprogs']

View File

@ -12,283 +12,36 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import socket
import subprocess
import dns.resolver
from charms import reactive
from charms.reactive import when, when_not, hook
from charms.reactive.flags import set_flag, clear_flag, is_flag_set
from charmhelpers.core import hookenv
from charmhelpers.core import unitdata
from charmhelpers.core.hookenv import (
application_version_set, config, log, ERROR, cached, DEBUG, unit_get,
network_get_primary_address, relation_ids,
status_set)
from charmhelpers.core.host import (
CompareHostReleases,
lsb_release,
service_restart,
service)
from charmhelpers.contrib.network.ip import (
get_address_in_network,
get_ipv6_addr)
from charmhelpers.fetch import (
get_upstream_version,
import charms_openstack.bus
import charms_openstack.charm as charm
charms_openstack.bus.discover()
charm.use_defaults(
'charm.installed',
'config.changed',
'config.rendered',
'upgrade-charm',
'update-status',
)
import jinja2
from charms.apt import queue_install, add_source
PACKAGES = ['ceph', 'gdisk', 'ntp', 'btrfs-tools', 'xfsprogs']
PACKAGES_FOCAL = ['ceph', 'gdisk', 'ntp', 'btrfs-progs', 'xfsprogs']
TEMPLATES_DIR = 'templates'
VERSION_PACKAGE = 'ceph-common'
def render_template(template_name, context, template_dir=TEMPLATES_DIR):
templates = jinja2.Environment(
loader=jinja2.FileSystemLoader(template_dir))
template = templates.get_template(template_name)
return template.render(context)
@when_not('apt.installed.ceph')
def install_ceph_base():
add_source(config('source'), key=config('key'))
release = lsb_release()['DISTRIB_CODENAME'].lower()
if CompareHostReleases(release) >= 'focal':
queue_install(PACKAGES_FOCAL)
else:
queue_install(PACKAGES)
@when_not('apt.installed.ceph-mds')
def install_cephfs():
queue_install(['ceph-mds'])
@when('cephfs.configured')
@when('ceph-mds.pools.available')
@when_not('cephfs.started')
def setup_mds(relation):
service_name = 'ceph-mds@{}'.format(socket.gethostname())
if service_restart(service_name):
set_flag('cephfs.started')
service('enable', service_name)
application_version_set(get_upstream_version(VERSION_PACKAGE))
else:
log(message='Error: restarting cpeh-mds', level=ERROR)
clear_flag('cephfs.started')
@when('ceph-mds.available')
def config_changed(ceph_client):
charm_ceph_conf = os.path.join(os.sep,
'etc',
'ceph',
'ceph.conf')
key_path = os.path.join(os.sep,
'var',
'lib',
'ceph',
'mds',
'ceph-{}'.format(socket.gethostname())
)
if not os.path.exists(key_path):
os.makedirs(key_path)
cephx_key = os.path.join(key_path,
'keyring')
ceph_context = {
'fsid': ceph_client.fsid(),
'auth_supported': ceph_client.auth(),
'use_syslog': str(config('use-syslog')).lower(),
'mon_hosts': ' '.join(ceph_client.mon_hosts()),
'loglevel': config('loglevel'),
'hostname': socket.gethostname(),
'mds_name': socket.gethostname(),
}
networks = get_networks('ceph-public-network')
if networks:
ceph_context['ceph_public_network'] = ', '.join(networks)
elif config('prefer-ipv6'):
dynamic_ipv6_address = get_ipv6_addr()[0]
ceph_context['public_addr'] = dynamic_ipv6_address
else:
ceph_context['public_addr'] = get_public_addr()
try:
with open(charm_ceph_conf, 'w') as ceph_conf:
ceph_conf.write(render_template('ceph.conf', ceph_context))
except IOError as err:
log("IOError writing ceph.conf: {}".format(err))
clear_flag('cephfs.configured')
return
try:
with open(cephx_key, 'w') as key_file:
key_file.write("[mds.{}]\n\tkey = {}\n".format(
socket.gethostname(),
ceph_client.mds_key()
))
except IOError as err:
log("IOError writing mds-a.keyring: {}".format(err))
clear_flag('cephfs.configured')
return
set_flag('cephfs.configured')
def get_networks(config_opt='ceph-public-network'):
"""Get all configured networks from provided config option.
If public network(s) are provided, go through them and return those for
which we have an address configured.
"""
networks = config(config_opt)
if networks:
networks = networks.split()
return [n for n in networks if get_address_in_network(n)]
return []
@cached
def get_public_addr():
if config('ceph-public-network'):
return get_network_addrs('ceph-public-network')[0]
try:
return network_get_primary_address('public')
except NotImplementedError:
log("network-get not supported", DEBUG)
return get_host_ip()
@cached
def get_host_ip(hostname=None):
if config('prefer-ipv6'):
return get_ipv6_addr()[0]
hostname = hostname or unit_get('private-address')
try:
# Test to see if already an IPv4 address
socket.inet_aton(hostname)
return hostname
except socket.error:
# This may throw an NXDOMAIN exception; in which case
# things are badly broken so just let it kill the hook
answers = dns.resolver.query(hostname, 'A')
if answers:
return answers[0].address
def get_network_addrs(config_opt):
"""Get all configured public networks addresses.
If public network(s) are provided, go through them and return the
addresses we have configured on any of those networks.
"""
addrs = []
networks = config(config_opt)
if networks:
networks = networks.split()
addrs = [get_address_in_network(n) for n in networks]
addrs = [a for a in addrs if a]
if not addrs:
if networks:
msg = ("Could not find an address on any of '%s' - resolve this "
"error to retry" % networks)
status_set('blocked', msg)
raise Exception(msg)
else:
return [get_host_ip()]
return addrs
def assess_status():
"""Assess status of current unit"""
statuses = set([])
messages = set([])
# Handle Series Upgrade
if unitdata.kv().get('charm.vault.series-upgrading'):
status_set("blocked",
"Ready for do-release-upgrade and reboot. "
"Set complete when finished.")
return
if is_flag_set('cephfs.started'):
(status, message) = log_mds()
statuses.add(status)
messages.add(message)
if 'blocked' in statuses:
status = 'blocked'
elif 'waiting' in statuses:
status = 'waiting'
else:
status = 'active'
message = '; '.join(messages)
status_set(status, message)
def get_running_mds():
"""Returns a list of the pids of the current running MDS daemons"""
cmd = ['pgrep', 'ceph-mds']
try:
result = subprocess.check_output(cmd).decode('utf-8')
return result.split()
except subprocess.CalledProcessError:
return []
def log_mds():
if len(relation_ids('ceph-mds')) < 1:
return 'blocked', 'Missing relation: monitor'
running_mds = get_running_mds()
if not running_mds:
return 'blocked', 'No MDS detected using current configuration'
else:
return 'active', 'Unit is ready ({} MDS)'.format(len(running_mds))
# Per https://github.com/juju-solutions/charms.reactive/issues/33,
# this module may be imported multiple times so ensure the
# initialization hook is only registered once. I have to piggy back
# onto the namespace of a module imported before reactive discovery
# to do this.
if not hasattr(reactive, '_ceph_log_registered'):
# We need to register this to run every hook, not just during install
# and config-changed, to protect against race conditions. If we don't
# do this, then the config in the hook environment may show updates
# to running hooks well before the config-changed hook has been invoked
# and the intialization provided an opertunity to be run.
hookenv.atexit(assess_status)
reactive._ceph_log_registered = True
# Series upgrade hooks are a special case and reacting to the hook directly
# makes sense as we may not want other charm code to run
@hook('pre-series-upgrade')
def pre_series_upgrade():
"""Handler for pre-series-upgrade.
"""
unitdata.kv().set('charm.vault.series-upgrading', True)
@hook('post-series-upgrade')
def post_series_upgrade():
"""Handler for post-series-upgrade.
"""
release = lsb_release()['DISTRIB_CODENAME'].lower()
if CompareHostReleases(release) >= 'focal':
queue_install(PACKAGES_FOCAL)
unitdata.kv().set('charm.vault.series-upgrading', False)
@reactive.when_none('charm.paused', 'run-default-update-status')
@reactive.when('ceph-mds.available')
def config_changed():
ceph_mds = reactive.endpoint_from_flag('ceph-mds.available')
with charm.provide_charm_instance() as cephfs_charm:
cephfs_charm.configure_ceph_keyring(ceph_mds.mds_key())
cephfs_charm.render_with_interfaces([ceph_mds])
if reactive.is_flag_set('config.changed.source'):
# update system source configuration and check for upgrade
cephfs_charm.install()
cephfs_charm.upgrade_if_available([ceph_mds])
reactive.clear_flag('config.changed.source')
reactive.set_flag('cephfs.configured')
reactive.set_flag('config.rendered')
cephfs_charm.assess_status()

View File

@ -1,24 +1,24 @@
[global]
auth cluster required = {{ auth_supported }}
auth service required = {{ auth_supported }}
auth client required = {{ auth_supported }}
auth cluster required = {{ ceph_mds.auth }}
auth service required = {{ ceph_mds.auth }}
auth client required = {{ ceph_mds.auth }}
keyring = /etc/ceph/$cluster.$name.keyring
mon host = {{ mon_hosts }}
fsid = {{ fsid }}
mon host = {{ ceph_mds.monitors }}
fsid = {{ ceph_mds.fsid }}
log to syslog = {{ use_syslog }}
err to syslog = {{ use_syslog }}
clog to syslog = {{ use_syslog }}
mon cluster log to syslog = {{ use_syslog }}
debug mon = {{ loglevel }}/5
debug osd = {{ loglevel }}/5
log to syslog = {{ options.use_syslog }}
err to syslog = {{ options.use_syslog }}
clog to syslog = {{ options.use_syslog }}
mon cluster log to syslog = {{ options.use_syslog }}
debug mon = {{ options.loglevel }}/5
debug osd = {{ options.loglevel }}/5
{% if ceph_public_network %}
public network = {{ ceph_public_network }}
{% if options.networks %}
public network = {{ options.networks|join(',') }}
{%- endif %}
{%- if public_addr %}
public addr = {{ public_addr }}
{%- if options.public_addr %}
public addr = {{ options.public_addr }}
{%- endif %}
[client]
@ -27,7 +27,7 @@ log file = /var/log/ceph.log
[mds]
keyring = /var/lib/ceph/mds/$cluster-$id/keyring
[mds.{{ mds_name }}]
host = {{ hostname }}
[mds.{{ options.mds_name }}]
host = {{ options.hostname }}

View File

@ -23,6 +23,7 @@ configure:
- zaza.openstack.charm_tests.keystone.setup.add_demo_user
tests:
- zaza.openstack.charm_tests.ceph.fs.tests.CephFSTests
- zaza.openstack.charm_tests.ceph.fs.tests.CharmOperationTest
tests_options:
force_deploy:
- focal-ussuri

View File

@ -3,7 +3,7 @@
# requirements management in charms via bot-control. Thank you.
#
# Lint and unit test requirements
flake8>=2.2.4,<=2.4.1
flake8>=2.2.4
stestr>=2.2.0
requests>=2.18.4
charms.reactive

View File

@ -13,30 +13,14 @@
# limitations under the License.
import sys
import mock
import unittest.mock as mock
sys.path.append('src')
sys.path.append('src/lib')
apt_pkg = mock.MagicMock()
charmhelpers = mock.MagicMock()
sys.modules['apt_pkg'] = apt_pkg
sys.modules['charmhelpers'] = charmhelpers
sys.modules['charmhelpers.core'] = charmhelpers.core
sys.modules['charmhelpers.core.hookenv'] = charmhelpers.core.hookenv
sys.modules['charmhelpers.core.host'] = charmhelpers.core.host
sys.modules['charmhelpers.core.unitdata'] = charmhelpers.core.unitdata
sys.modules['charmhelpers.core.templating'] = charmhelpers.core.templating
sys.modules['charmhelpers.contrib'] = charmhelpers.contrib
sys.modules['charmhelpers.contrib.openstack'] = charmhelpers.contrib.openstack
sys.modules['charmhelpers.contrib.openstack.utils'] = (
charmhelpers.contrib.openstack.utils)
sys.modules['charmhelpers.contrib.openstack.templating'] = (
charmhelpers.contrib.openstack.templating)
sys.modules['charmhelpers.contrib.network'] = charmhelpers.contrib.network
sys.modules['charmhelpers.contrib.network.ip'] = (
charmhelpers.contrib.network.ip)
sys.modules['charmhelpers.fetch'] = charmhelpers.fetch
sys.modules['charmhelpers.cli'] = charmhelpers.cli
sys.modules['charmhelpers.contrib.hahelpers'] = charmhelpers.contrib.hahelpers
sys.modules['charmhelpers.contrib.hahelpers.cluster'] = (
charmhelpers.contrib.hahelpers.cluster)
# Mock out charmhelpers so that we can test without it.
import charms_openstack.test_mocks # noqa
charms_openstack.test_mocks.mock_charmhelpers()
sys.modules['dns'] = mock.MagicMock()
sys.modules['dns.resolver'] = mock.MagicMock()

View File

@ -0,0 +1,82 @@
# Copyright 2020 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest.mock as mock
import charms_openstack.test_utils as test_utils
import charm.openstack.ceph_fs as ceph_fs
class TestMitakaCephFsCharm(test_utils.PatchHelper):
def setUp(self):
super().setUp()
self.patch_release('mitaka')
self.patch('socket.gethostname', name='gethostname')
self.gethostname.return_value = 'somehost'
self.target = ceph_fs.MitakaCephFSCharm()
def test_packages(self):
# Package list is the only difference between the past version and
# future versions of this charm, see ``TestCephFsCharm`` for the rest
# of the tests
self.assertEquals(self.target.packages, [
'ceph-mds', 'gdisk', 'ntp', 'btrfs-tools', 'xfsprogs'])
class TestCephFsCharm(test_utils.PatchHelper):
def setUp(self):
super().setUp()
self.patch_release('ussuri')
self.patch('socket.gethostname', name='gethostname')
self.gethostname.return_value = 'somehost'
self.target = ceph_fs.UssuriCephFSCharm()
def patch_target(self, attr, return_value=None):
mocked = mock.patch.object(self.target, attr)
self._patches[attr] = mocked
started = mocked.start()
started.return_value = return_value
self._patches_start[attr] = started
setattr(self, attr, started)
def test___init__(self):
self.assertEquals(self.target.services, [
'ceph-mds@somehost'])
self.assertDictEqual(self.target.restart_map, {
'/etc/ceph/ceph.conf': ['ceph-mds@somehost']})
self.assertEquals(self.target.packages, [
'ceph-mds', 'gdisk', 'ntp', 'btrfs-progs', 'xfsprogs'])
def test_configuration_class(self):
self.assertEquals(self.target.options.hostname, 'somehost')
self.assertEquals(self.target.options.mds_name, 'somehost')
self.patch_target('get_networks')
self.get_networks.return_value = ['fakeaddress']
self.assertEquals(self.target.options.networks, ['fakeaddress'])
self.patch_object(ceph_fs.ch_core.hookenv, 'config')
self.config.side_effect = lambda x: {'prefer-ipv6': False}.get(x)
self.patch_object(ceph_fs, 'get_ipv6_addr')
self.get_ipv6_addr.return_value = ['2001:db8::fake']
self.patch_target('get_public_addr')
self.get_public_addr.return_value = '192.0.2.42'
self.assertEquals(
self.target.options.public_addr,
'192.0.2.42')
self.config.side_effect = lambda x: {'prefer-ipv6': True}.get(x)
self.assertEquals(
self.target.options.public_addr,
'2001:db8::fake')

View File

@ -0,0 +1,81 @@
# Copyright 2020 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest.mock as mock
import charm.openstack.ceph_fs as ceph_fs
import reactive.ceph_fs as handlers
import charms_openstack.test_utils as test_utils
class TestRegisteredHooks(test_utils.TestRegisteredHooks):
def test_hooks(self):
defaults = [
'charm.installed',
'config.changed',
'config.rendered',
'upgrade-charm',
'update-status',
]
hook_set = {
'when': {
'config_changed': ('ceph-mds.available',),
},
'when_none': {
'config_changed': ('charm.paused',
'run-default-update-status',),
},
}
# test that the hooks were registered via the reactive.ceph_fs module
self.registered_hooks_test_helper(handlers, hook_set, defaults)
class TestCephFSHandlers(test_utils.PatchHelper):
def setUp(self):
super().setUp()
self.patch_release(ceph_fs.UssuriCephFSCharm.release)
self.target = mock.MagicMock()
self.patch_object(handlers.charm, 'provide_charm_instance',
new=mock.MagicMock())
self.provide_charm_instance().__enter__.return_value = \
self.target
self.provide_charm_instance().__exit__.return_value = None
def test_config_changed(self):
self.patch_object(handlers.reactive, 'endpoint_from_flag')
self.patch_object(handlers.reactive, 'is_flag_set')
self.patch_object(handlers.reactive, 'clear_flag')
self.patch_object(handlers.reactive, 'set_flag')
ceph_mds = mock.MagicMock()
ceph_mds.mds_key.return_value = 'fakekey'
self.endpoint_from_flag.return_value = ceph_mds
self.is_flag_set.return_value = False
handlers.config_changed()
self.endpoint_from_flag.assert_called_once_with('ceph-mds.available')
self.target.configure_ceph_keyring.assert_called_once_with('fakekey')
self.target.render_with_interfaces.assert_called_once_with([ceph_mds])
self.is_flag_set.assert_called_once_with('config.changed.source')
self.set_flag.assert_has_calls([
mock.call('cephfs.configured'),
mock.call('config.rendered'),
])
self.target.install.assert_not_called()
self.target.upgrade_if_available.assert_not_called()
self.is_flag_set.return_value = True
handlers.config_changed()
self.target.install.assert_called_once_with()
self.target.upgrade_if_available.assert_called_once_with([ceph_mds])