
This change adds the capability to rename the subcloud after bootstrap or during subcloud rehome operation. Added a field in the database to separate the region name from the subcloud name. The region name determines the subcloud reference in the Openstack core, through which it is possible to access the endpoints of a given subcloud. Since the region name cannot be changed, this commit adds the ability to maintain a unique region name based on the UUID format, and allows subcloud renaming when necessary without any endpoint impact. The region is randomly generated to configure the subcloud when it is created and only applies to future subclouds. For those systems that have existing subclouds, the region will be the same as on day 0, that is, region will keep the same name as the subcloud, but subclouds can be renamed. This topic involves changes to dcmanager, dcmanager-client and GUI. To ensure the region name reference needed by the cert-monitor, a mechanism to determine if the request is coming from the cert-monitor has been created. Usage for subcloud rename: dcmanager subcloud update <subcloud-name> --name <new-name> Usage for subcloud rehoming: dcmanager subcloud add --name <subcloud-name> --migrate ... Note: Upgrade test from StarlingX 8 -> 9 for this commit is deferred until upgrade functionality in master is restored. Any issue found during upgrade test will be addressed in a separate commit Test Plan: PASS: Run dcmanager subcloud passing subcommands: - add/delete/migrate/list/show/show --detail - errors/manage/unmanage/reinstall/reconfig - update/deploy PASS: Run dcmanager subcloud add supplying --name parameter and validate the operation is not allowed PASS: Run dcmanager supplying subcommands: - kube/patch/prestage strategies PASS: Run dcmanager to apply patch and remove it PASS: Run dcmanager subcloud-backup: - create/delete/restore/show/upload PASS: Run subcloud-group: - add/delete/list/list-subclouds/show/update PASS: Run dcmanager subcloud strategy for: - patch/kubernetes/firmware PASS: Run dcmanager subcloud update command passing --name parameter supplying the following values: - current subcloud name (not changed) - different existing subcloud name PASS: Run dcmanager to migrate a subcloud passing --name parameter supplying a new subcloud name PASS: Run dcmanager to migrate a subcloud without --name parameter PASS: Run dcmanager to migrate a subcloud passing --name parameter supplying a new subcloud name and different subcloud name in bootstrap file PASS: Test dcmanager API response using cURL command line to validate new region name field PASS: Run full DC sanity and regression Story: 2010788 Task: 48217 Signed-off-by: Cristian Mondo <cristian.mondo@windriver.com> Change-Id: Id04f42504b8e325d9ec3880c240fe4a06e3a20b7
170 lines
6.9 KiB
Python
170 lines
6.9 KiB
Python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
# not use this file except in compliance with the License. You may obtain
|
|
# a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
# License for the specific language governing permissions and limitations
|
|
# under the License.
|
|
#
|
|
# Copyright (c) 2017-2023 Wind River Systems, Inc.
|
|
#
|
|
# The right to copy, distribute, modify, or otherwise make use
|
|
# of this software may be licensed only pursuant to the terms
|
|
# of an applicable Wind River license agreement.
|
|
#
|
|
|
|
import functools
|
|
import six
|
|
|
|
from oslo_config import cfg
|
|
from oslo_log import log as logging
|
|
import oslo_messaging
|
|
from oslo_service import service
|
|
|
|
from dccommon import consts as dccommon_consts
|
|
|
|
from dcmanager.audit import rpcapi as dcmanager_audit_rpc_client
|
|
from dcmanager.common import consts
|
|
from dcmanager.common import context
|
|
from dcmanager.common import exceptions
|
|
from dcmanager.common.i18n import _
|
|
from dcmanager.common import messaging as rpc_messaging
|
|
from dcmanager.common import utils
|
|
from dcmanager.state.subcloud_state_manager import SubcloudStateManager
|
|
|
|
LOG = logging.getLogger(__name__)
|
|
|
|
|
|
def request_context(func):
|
|
@functools.wraps(func)
|
|
def wrapped(self, ctx, *args, **kwargs):
|
|
if ctx is not None and not isinstance(ctx, context.RequestContext):
|
|
ctx = context.RequestContext.from_dict(ctx.to_dict())
|
|
try:
|
|
return func(self, ctx, *args, **kwargs)
|
|
except exceptions.DCManagerException:
|
|
raise oslo_messaging.rpc.dispatcher.ExpectedException()
|
|
|
|
return wrapped
|
|
|
|
|
|
class DCManagerStateService(service.Service):
|
|
"""Lifecycle manager for a running service.
|
|
|
|
- All the methods in here are called from the RPC client.
|
|
- If a RPC call does not have a corresponding method here, an exception
|
|
will be thrown.
|
|
- Arguments to these calls are added dynamically and will be treated as
|
|
keyword arguments by the RPC client.
|
|
"""
|
|
|
|
def __init__(self, host):
|
|
super(DCManagerStateService, self).__init__()
|
|
self.host = cfg.CONF.host
|
|
self.rpc_api_version = consts.RPC_API_VERSION
|
|
self.topic = consts.TOPIC_DC_MANAGER_STATE
|
|
# The following are initialized here, but assigned in start() which
|
|
# happens after the fork when spawning multiple worker processes
|
|
self.engine_id = None
|
|
self.target = None
|
|
self._rpc_server = None
|
|
self.subcloud_state_manager = None
|
|
self.audit_rpc_client = None
|
|
|
|
def _init_managers(self):
|
|
self.subcloud_state_manager = SubcloudStateManager()
|
|
|
|
def start(self):
|
|
LOG.info("Starting %s", self.__class__.__name__)
|
|
utils.set_open_file_limit(cfg.CONF.worker_rlimit_nofile)
|
|
self._init_managers()
|
|
target = oslo_messaging.Target(version=self.rpc_api_version,
|
|
server=self.host,
|
|
topic=self.topic)
|
|
self.target = target
|
|
self._rpc_server = rpc_messaging.get_rpc_server(self.target, self)
|
|
self._rpc_server.start()
|
|
# Used to notify dcmanager-audit
|
|
self.audit_rpc_client = dcmanager_audit_rpc_client.ManagerAuditClient()
|
|
|
|
super(DCManagerStateService, self).start()
|
|
|
|
def _stop_rpc_server(self):
|
|
# Stop RPC connection to prevent new requests
|
|
LOG.debug(_("Attempting to stop engine service..."))
|
|
try:
|
|
self._rpc_server.stop()
|
|
self._rpc_server.wait()
|
|
LOG.info('Engine service stopped successfully')
|
|
except Exception as ex:
|
|
LOG.error('Failed to stop engine service: %s',
|
|
six.text_type(ex))
|
|
|
|
def stop(self):
|
|
LOG.info("Stopping %s", self.__class__.__name__)
|
|
self._stop_rpc_server()
|
|
# Terminate the engine process
|
|
LOG.info("All threads were gone, terminating engine")
|
|
super(DCManagerStateService, self).stop()
|
|
|
|
@request_context
|
|
def update_subcloud_endpoint_status(self, context, subcloud_name=None,
|
|
subcloud_region=None,
|
|
endpoint_type=None,
|
|
sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC,
|
|
alarmable=True,
|
|
ignore_endpoints=None):
|
|
# Updates subcloud endpoint sync status
|
|
LOG.info("Handling update_subcloud_endpoint_status request for "
|
|
"subcloud: (%s) endpoint: (%s) status:(%s) "
|
|
% (subcloud_name, endpoint_type, sync_status))
|
|
|
|
self.subcloud_state_manager. \
|
|
update_subcloud_endpoint_status(context,
|
|
subcloud_region,
|
|
endpoint_type,
|
|
sync_status,
|
|
alarmable,
|
|
ignore_endpoints)
|
|
|
|
# If the patching sync status is being set to unknown, trigger the
|
|
# patching audit so it can update the sync status ASAP.
|
|
if endpoint_type == dccommon_consts.ENDPOINT_TYPE_PATCHING and \
|
|
sync_status == dccommon_consts.SYNC_STATUS_UNKNOWN:
|
|
self.audit_rpc_client.trigger_patch_audit(context)
|
|
|
|
# If the firmware sync status is being set to unknown, trigger the
|
|
# firmware audit so it can update the sync status ASAP.
|
|
if endpoint_type == dccommon_consts.ENDPOINT_TYPE_FIRMWARE and \
|
|
sync_status == dccommon_consts.SYNC_STATUS_UNKNOWN:
|
|
self.audit_rpc_client.trigger_firmware_audit(context)
|
|
|
|
# If the kubernetes sync status is being set to unknown, trigger the
|
|
# kubernetes audit so it can update the sync status ASAP.
|
|
if endpoint_type == dccommon_consts.ENDPOINT_TYPE_KUBERNETES and \
|
|
sync_status == dccommon_consts.SYNC_STATUS_UNKNOWN:
|
|
self.audit_rpc_client.trigger_kubernetes_audit(context)
|
|
|
|
return
|
|
|
|
@request_context
|
|
def update_subcloud_availability(self, context,
|
|
subcloud_name,
|
|
subcloud_region,
|
|
availability_status,
|
|
update_state_only=False,
|
|
audit_fail_count=None):
|
|
# Updates subcloud availability
|
|
LOG.info("Handling update_subcloud_availability request for: %s" %
|
|
subcloud_name)
|
|
self.subcloud_state_manager.update_subcloud_availability(
|
|
context,
|
|
subcloud_region,
|
|
availability_status,
|
|
update_state_only,
|
|
audit_fail_count)
|