Implement key rotation for ceph-fs
This patchset implements key rotation for the ceph-fs charm by receiving the new pending key from the ceph-mon charm and manually rotating it via Ceph's authtool. It makes use of the 'ceph-mds-relation-changed' hook for this. Change-Id: I773f389f56d78cd7ce58f9f2b5e7d7695164acb1 func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/1202
This commit is contained in:
parent
b852fa02bd
commit
8655c1b81a
@ -2,7 +2,6 @@
|
||||
templates:
|
||||
- charm-unit-jobs-py38
|
||||
- charm-unit-jobs-py39
|
||||
- charm-xena-functional-jobs
|
||||
- charm-yoga-functional-jobs
|
||||
- charm-functional-jobs
|
||||
vars:
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright 2016 Canonical Ltd
|
||||
# Copyright 2024 Canonical Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -23,6 +23,9 @@ from charmhelpers.core.hookenv import (
|
||||
import charms_openstack.bus
|
||||
import charms_openstack.charm as charm
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
|
||||
charms_openstack.bus.discover()
|
||||
|
||||
@ -41,6 +44,9 @@ charm.use_defaults(
|
||||
def config_changed():
|
||||
ceph_mds = reactive.endpoint_from_flag('ceph-mds.pools.available')
|
||||
with charm.provide_charm_instance() as cephfs_charm:
|
||||
host = cephfs_charm.hostname
|
||||
exists = os.path.exists('/var/lib/ceph/mds/ceph-%s/keyring' % host)
|
||||
|
||||
cephfs_charm.configure_ceph_keyring(ceph_mds.mds_key())
|
||||
cephfs_charm.render_with_interfaces([ceph_mds])
|
||||
if reactive.is_flag_set('config.changed.source'):
|
||||
@ -52,6 +58,22 @@ def config_changed():
|
||||
reactive.set_flag('config.rendered')
|
||||
cephfs_charm.assess_status()
|
||||
|
||||
# If the keyring file existed before this call, then the new
|
||||
# provided key implies a rotation.
|
||||
if exists:
|
||||
svc = 'ceph-mds@%s.service' % host
|
||||
try:
|
||||
# Reset the failure count first, as the service may fail
|
||||
# to come up due to the way the restart-map is handled.
|
||||
subprocess.check_call(['sudo', 'systemctl',
|
||||
'reset-failed', svc])
|
||||
subprocess.check_call(['sudo', 'systemctl', 'restart', svc])
|
||||
except subprocess.CalledProcessError as exc:
|
||||
# The service can be temporarily masked when booting, so
|
||||
# skip that class of errors.
|
||||
ch_core.hookenv.log('Failed to restart MDS service: %s' %
|
||||
str(exc))
|
||||
|
||||
|
||||
@reactive.when('ceph-mds.connected')
|
||||
def storage_ceph_connected(ceph):
|
||||
|
@ -1,45 +0,0 @@
|
||||
variables:
|
||||
openstack-origin: &openstack-origin cloud:focal-xena
|
||||
|
||||
local_overlay_enabled: False
|
||||
|
||||
|
||||
series: &series focal
|
||||
|
||||
applications:
|
||||
ubuntu: # used to test mounts
|
||||
charm: ch:ubuntu
|
||||
num_units: 2
|
||||
ceph-fs:
|
||||
charm: ../../../ceph-fs.charm
|
||||
num_units: 1
|
||||
options:
|
||||
source: *openstack-origin
|
||||
pool-type: erasure-coded
|
||||
ec-profile-k: 4
|
||||
ec-profile-m: 2
|
||||
|
||||
ceph-osd:
|
||||
charm: ch:ceph-osd
|
||||
num_units: 6
|
||||
storage:
|
||||
osd-devices: 'cinder,10G'
|
||||
options:
|
||||
osd-devices: '/dev/test-non-existent'
|
||||
source: *openstack-origin
|
||||
channel: latest/edge
|
||||
|
||||
ceph-mon:
|
||||
charm: ch:ceph-mon
|
||||
num_units: 3
|
||||
options:
|
||||
monitor-count: '3'
|
||||
source: *openstack-origin
|
||||
channel: latest/edge
|
||||
|
||||
relations:
|
||||
- - 'ceph-mon:mds'
|
||||
- 'ceph-fs:ceph-mds'
|
||||
|
||||
- - 'ceph-osd:mon'
|
||||
- 'ceph-mon:osd'
|
@ -17,6 +17,7 @@ tests:
|
||||
- zaza.openstack.charm_tests.ceph.fs.tests.CephFSTests
|
||||
- zaza.openstack.charm_tests.ceph.fs.tests.CharmOperationTest
|
||||
- zaza.openstack.charm_tests.ceph.tests.BlueStoreCompressionCharmOperation
|
||||
- zaza.openstack.charm_tests.ceph.tests.CephMonKeyRotationTests
|
||||
|
||||
target_deploy_status:
|
||||
ubuntu:
|
||||
|
@ -61,6 +61,8 @@ class TestCephFSHandlers(test_utils.PatchHelper):
|
||||
self.patch_object(handlers.reactive, 'is_flag_set')
|
||||
self.patch_object(handlers.reactive, 'clear_flag')
|
||||
self.patch_object(handlers.reactive, 'set_flag')
|
||||
self.patch_object(handlers.os.path, 'exists')
|
||||
handlers.os.path.exists.return_value = False
|
||||
ceph_mds = mock.MagicMock()
|
||||
ceph_mds.mds_key.return_value = 'fakekey'
|
||||
self.endpoint_from_flag.return_value = ceph_mds
|
||||
|
Loading…
x
Reference in New Issue
Block a user