Remove RPC server from ceph-manager

These RPC methods were called by sysinv. However, due to changes in [1]
and [2],these rpc methods have no effect anymore:
- get_primary_tier_size: always returns 0.
- get_tiers_size: always returns {}.
- is_cluster_up: this rpc is not called by sysinv.

Moreover, as mentioned in [2], there is still parts of code in sysinv
that are marked with "TODO(CephPoolsDecouple)" which is not in use
anymore and can be removed, which include methods that perform calls
to ceph-manager rpc methods.

Therefore, sysinv does not need to call these rpc methods anymore and
the rpc server from ceph-manager can be removed.
A change proposed in [3] replaces these rpc calls with their respective
current return values.

Story: 2010087
Task: 46520

Test Plan:
PASS: AIO-SX: manually replaced these and [3] files into a Debian
installation and no crashes happened.
PASS: rebuild the whole system with this and [3] modifications with no
crashes.
PASS: List rabbitmq queues and confirm that sysinv.ceph-manager queues
are not listed
PASS: Configure a ceph storage backend with no errors
PASS: Check no error is found in ceph-manager.log and sysinv.log
PASS: Confirm no alarm is raised


[1] https://review.opendev.org/c/starlingx/utilities/+/820933
[2] https://review.opendev.org/c/starlingx/config/+/620448
[3] https://review.opendev.org/c/starlingx/config/+/860690

Signed-off-by: Alyson Deives Pereira <alyson.deivespereira@windriver.com>
Change-Id: I30bedf6c46519e0dab89cffed821063b0ff67bd6
This commit is contained in:
Alyson Deives Pereira 2022-10-06 10:20:30 -03:00
parent a5fe044d5d
commit 190e824490
2 changed files with 2 additions and 78 deletions

View File

@ -62,10 +62,6 @@ REPAIR_ACTION_MAJOR_CRITICAL_ALARM = _(
'If problem persists, contact next level of support.')
REPAIR_ACTION = _('If problem persists, contact next level of support.')
SYSINV_CONDUCTOR_TOPIC = 'sysinv.conductor_manager'
CEPH_MANAGER_TOPIC = 'sysinv.ceph_manager'
SYSINV_CONFIG_FILE = '/etc/sysinv/sysinv.conf'
# Titanium Cloud version strings
TITANIUM_SERVER_VERSION_18_03 = '18.03'

View File

@ -44,51 +44,6 @@ CONF.logging_default_format_string = (
logging.register_options(CONF)
logging.setup(CONF, __name__)
LOG = logging.getLogger(__name__)
CONF.rpc_backend = 'rabbit'
class RpcEndpoint(PeriodicTasks):
def __init__(self, service=None):
self.service = service
def get_primary_tier_size(self, _):
"""Get the ceph size for the primary tier.
returns: an int for the size (in GB) of the tier
"""
tiers_size = self.service.monitor.tiers_size
primary_tier_size = tiers_size.get(
self.service.monitor.primary_tier_name, 0)
LOG.debug(_LI("Ceph cluster primary tier size: %s GB") %
str(primary_tier_size))
return primary_tier_size
def get_tiers_size(self, _):
"""Get the ceph cluster tier sizes.
returns: a dict of sizes (in GB) by tier name
"""
tiers_size = self.service.monitor.tiers_size
LOG.debug(_LI("Ceph cluster tiers (size in GB): %s") %
str(tiers_size))
return tiers_size
def is_cluster_up(self, _):
"""Report if the last health check was successful.
This is an independent view of the cluster accessibility that can be
used by the sysinv conductor to gate ceph API calls which would timeout
and potentially block other operations.
This view is only updated at the rate the monitor checks for a cluster
uuid or a health check (CEPH_HEALTH_CHECK_INTERVAL)
returns: boolean True if last health check was successful else False
"""
return self.service.monitor.cluster_is_up
class Service(service.Service):
@ -96,7 +51,6 @@ class Service(service.Service):
def __init__(self, conf):
super(Service, self).__init__()
self.conf = conf
self.rpc_server = None
self.ceph_api = None
self.entity_instance_id = ''
self.fm_api = fm_api.FaultAPIs()
@ -107,38 +61,12 @@ class Service(service.Service):
def start(self):
super(Service, self).start()
# pylint: disable=protected-access
sysinv_conf = self.conf._namespace._normalized[0]['DEFAULT']
url = "rabbit://{user}:{password}@{host}:{port}"\
"".format(user=sysinv_conf['rabbit_userid'][0],
password=sysinv_conf['rabbit_password'][0],
host=utils.ipv6_bracketed(
sysinv_conf['rabbit_host'][0]),
port=sysinv_conf['rabbit_port'][0])
transport = messaging.get_transport(self.conf, url=url)
self.ceph_api = wrapper.CephWrapper(
endpoint='http://localhost:{}'.format(constants.CEPH_MGR_PORT))
# Get initial config from sysinv and send it to
# services that need it before starting them
self.rpc_server = messaging.get_rpc_server(
transport,
messaging.Target(topic=constants.CEPH_MANAGER_TOPIC,
server=self.conf.sysinv_api_bind_ip),
[RpcEndpoint(self)],
executor='eventlet')
self.rpc_server.start()
eventlet.spawn_n(self.monitor.run)
def stop(self):
try:
self.rpc_server.stop()
self.rpc_server.wait()
except Exception:
pass
super(Service, self).stop()
def stop(self, graceful=False):
super(Service, self).stop(graceful)
def run_service():