
System peer monitoring and handling: It does peer health check by querying peer group list. After failure is detected and the number of heartbeat failure reaches the threshold, alarm will be raised. After connection is back online, alarm will be cleared, and will perform an audit to the peer groups that are associated to the system peer. Subcloud peer group audit and handling: If the remote peer group's migration_status is in the 'migrating' state, unmanage the subclouds of the local peer group. If the remote peer group's migration_status is in the 'complete' state, compare the subclouds on both ends. If the remote end is in a 'managed+online' state, set the local subclouds with the same region_name to 'unmanaged+secondary.' If the remote peer group's migration_status is in the 'none' state, set the migration_status of the local peer group to 'none' as well. Batch rehome update: When Subcloud Peer Group based batch rehoming is performed, need to check if the associated System Peer is alive or not: If yes, and the remote subcloud with same region_name is being managed in peer site, need to unmanage it before rehoming it to local site. If not, this means the peer site is not available for the subcloud management any more, rehoming can be performed directly. If the subcloud peer group priority is 0, will try to clear alarm of FM_ALARM_ID_DC_SUBCLOUD_PEER_GROUP_NOT_MANAGED after batch rehoming is complete. Security enhancement: Base64 encode/decode admin_password when save/load rehome_data from DB. Test Plan: 1. PASS - Add a system peer on DC0, unplug the OAM network between DC0 and DC1. Alarm raised. 2. PASS - Reconnect the OAM network between DC0 and DC1, previous alarm will be cleared. 3. PASS - Add a subcloud peer group on DC0. Add two subclouds on DC0 under the subcloud peer group. which should be in managed, online and complete states. Add a system peer on DC0 pointing to DC1, Add a peer-group-association associating the peer-group and the system-peer above on DC0. create another association in DC1, for system DC0 and the peer group synced to DC1. Shutdown DC0, perform the migrate operation on subcloud-peer-group from DC1, after all subclouds have been migrated and entered online managed states. Power on DC0, check alarm like "Subcloud peer group xxx is managed by remote system (peer_uuid=xxx) with lower priority." has been raised. Check the subcloud state on DC0, should be in 'secondary' state, Perform the migrate operation on subcloud-peer-group from DC0. After all subclouds have been migrated and entered online managed states. Alarm is cleared. Check subclouds on DC1, subcloud state should be in secondary state. 4.PASS - Migrate a subcloud peer group with 0 priority, Before migrate, check alarm FM_ALARM_ID_DC_SUBCLOUD_PEER_GROUP_NOT_MANAGED exists. After migration done, check alarm cleared. 5. PASS - When the remote peer group's migration_status is in the 'migrating' state (if DC0 comes back online while rehoming is still in progress); DC0's subcloud will be auto set to 'unmanaged', after DC1's migration is complete and all subcloud in 'managed+online', DC0's subcloud will auto set to 'secondary'. Story: 2010852 Task: 48483 Task: 48509 Task: 48819 Change-Id: Ic97e7c4a7628445522adfba4b0b2e0cc945cbe22 Signed-off-by: Wang Tao <tao.wang@windriver.com>
313 lines
14 KiB
Python
313 lines
14 KiB
Python
# Copyright (c) 2017-2023 Wind River Systems, Inc.
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
# not use this file except in compliance with the License. You may obtain
|
|
# a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
# License for the specific language governing permissions and limitations
|
|
# under the License.
|
|
#
|
|
|
|
"""
|
|
Client side of the DC Manager RPC API.
|
|
"""
|
|
|
|
from oslo_log import log as logging
|
|
|
|
from dccommon import consts as dccommon_consts
|
|
from dcmanager.common import consts
|
|
from dcmanager.common import messaging
|
|
|
|
LOG = logging.getLogger(__name__)
|
|
|
|
|
|
class RPCClient(object):
|
|
"""RPC client
|
|
|
|
Basic RPC client implementation to deliver RPC 'call' and 'cast'
|
|
"""
|
|
|
|
def __init__(self, timeout, topic, version):
|
|
self._client = messaging.get_rpc_client(timeout=timeout, topic=topic,
|
|
version=version)
|
|
|
|
@staticmethod
|
|
def make_msg(method, **kwargs):
|
|
return method, kwargs
|
|
|
|
def call(self, ctxt, msg, version=None):
|
|
method, kwargs = msg
|
|
if version is not None:
|
|
client = self._client.prepare(version=version)
|
|
else:
|
|
client = self._client
|
|
return client.call(ctxt, method, **kwargs)
|
|
|
|
def cast(self, ctxt, msg, fanout=None, version=None):
|
|
method, kwargs = msg
|
|
if fanout or version:
|
|
client = self._client.prepare(fanout=fanout, version=version)
|
|
else:
|
|
client = self._client
|
|
return client.cast(ctxt, method, **kwargs)
|
|
|
|
|
|
class SubcloudStateClient(RPCClient):
|
|
"""Client to update subcloud availability."""
|
|
|
|
BASE_RPC_API_VERSION = '1.0'
|
|
|
|
def __init__(self, timeout=None):
|
|
super(SubcloudStateClient, self).__init__(
|
|
timeout,
|
|
consts.TOPIC_DC_MANAGER_STATE,
|
|
self.BASE_RPC_API_VERSION)
|
|
|
|
def update_subcloud_availability(self, ctxt,
|
|
subcloud_name,
|
|
subcloud_region,
|
|
availability_status,
|
|
update_state_only=False,
|
|
audit_fail_count=None):
|
|
# Note: synchronous
|
|
return self.call(
|
|
ctxt,
|
|
self.make_msg('update_subcloud_availability',
|
|
subcloud_name=subcloud_name,
|
|
subcloud_region=subcloud_region,
|
|
availability_status=availability_status,
|
|
update_state_only=update_state_only,
|
|
audit_fail_count=audit_fail_count))
|
|
|
|
def update_subcloud_endpoint_status(self, ctxt, subcloud_name=None,
|
|
subcloud_region=None,
|
|
endpoint_type=None,
|
|
sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC,
|
|
ignore_endpoints=None,
|
|
alarmable=True):
|
|
# Note: This is an asynchronous operation.
|
|
# See below for synchronous method call
|
|
return self.cast(ctxt, self.make_msg('update_subcloud_endpoint_status',
|
|
subcloud_name=subcloud_name,
|
|
subcloud_region=subcloud_region,
|
|
endpoint_type=endpoint_type,
|
|
sync_status=sync_status,
|
|
ignore_endpoints=ignore_endpoints,
|
|
alarmable=alarmable))
|
|
|
|
def update_subcloud_endpoint_status_sync(self, ctxt, subcloud_name=None,
|
|
subcloud_region=None,
|
|
endpoint_type=None,
|
|
sync_status=dccommon_consts.SYNC_STATUS_OUT_OF_SYNC,
|
|
ignore_endpoints=None,
|
|
alarmable=True):
|
|
# Note: synchronous
|
|
return self.call(ctxt, self.make_msg('update_subcloud_endpoint_status',
|
|
subcloud_name=subcloud_name,
|
|
subcloud_region=subcloud_region,
|
|
endpoint_type=endpoint_type,
|
|
sync_status=sync_status,
|
|
ignore_endpoints=ignore_endpoints,
|
|
alarmable=alarmable))
|
|
|
|
|
|
class ManagerClient(RPCClient):
|
|
"""Client side of the DC Manager rpc API.
|
|
|
|
Version History:
|
|
1.0 - Initial version (Mitaka 1.0 release)
|
|
"""
|
|
|
|
BASE_RPC_API_VERSION = '1.0'
|
|
|
|
def __init__(self, timeout=None):
|
|
super(ManagerClient, self).__init__(
|
|
timeout,
|
|
consts.TOPIC_DC_MANAGER,
|
|
self.BASE_RPC_API_VERSION)
|
|
|
|
def add_subcloud(self, ctxt, subcloud_id, payload):
|
|
return self.cast(ctxt, self.make_msg('add_subcloud',
|
|
subcloud_id=subcloud_id,
|
|
payload=payload))
|
|
|
|
def add_secondary_subcloud(self, ctxt, subcloud_id, payload):
|
|
return self.call(ctxt, self.make_msg('add_subcloud',
|
|
subcloud_id=subcloud_id,
|
|
payload=payload))
|
|
|
|
def delete_subcloud(self, ctxt, subcloud_id):
|
|
return self.call(ctxt, self.make_msg('delete_subcloud',
|
|
subcloud_id=subcloud_id))
|
|
|
|
def rename_subcloud(self, ctxt, subcloud_id, curr_subcloud_name, new_subcloud_name=None):
|
|
return self.call(ctxt, self.make_msg('rename_subcloud',
|
|
subcloud_id=subcloud_id,
|
|
curr_subcloud_name=curr_subcloud_name,
|
|
new_subcloud_name=new_subcloud_name))
|
|
|
|
def update_subcloud(self, ctxt, subcloud_id, management_state=None,
|
|
description=None, location=None, group_id=None,
|
|
data_install=None, force=None,
|
|
deploy_status=None, peer_group_id=None, bootstrap_values=None, bootstrap_address=None):
|
|
return self.call(ctxt, self.make_msg('update_subcloud',
|
|
subcloud_id=subcloud_id,
|
|
management_state=management_state,
|
|
description=description,
|
|
location=location,
|
|
group_id=group_id,
|
|
data_install=data_install,
|
|
force=force,
|
|
deploy_status=deploy_status,
|
|
peer_group_id=peer_group_id,
|
|
bootstrap_values=bootstrap_values,
|
|
bootstrap_address=bootstrap_address))
|
|
|
|
def update_subcloud_with_network_reconfig(self, ctxt, subcloud_id, payload):
|
|
return self.cast(ctxt, self.make_msg('update_subcloud_with_network_reconfig',
|
|
subcloud_id=subcloud_id,
|
|
payload=payload))
|
|
|
|
def redeploy_subcloud(self, ctxt, subcloud_id, payload):
|
|
return self.cast(ctxt, self.make_msg('redeploy_subcloud',
|
|
subcloud_id=subcloud_id,
|
|
payload=payload))
|
|
|
|
def backup_subclouds(self, ctxt, payload):
|
|
return self.cast(ctxt, self.make_msg('backup_subclouds',
|
|
payload=payload))
|
|
|
|
def delete_subcloud_backups(self, ctxt, release_version, payload):
|
|
return self.call(ctxt, self.make_msg('delete_subcloud_backups',
|
|
release_version=release_version,
|
|
payload=payload))
|
|
|
|
def restore_subcloud_backups(self, ctxt, payload):
|
|
return self.cast(ctxt, self.make_msg('restore_subcloud_backups',
|
|
payload=payload))
|
|
|
|
def update_subcloud_sync_endpoint_type(self, ctxt,
|
|
subcloud_region,
|
|
endpoint_type_list,
|
|
openstack_installed):
|
|
return self.cast(
|
|
ctxt,
|
|
self.make_msg('update_subcloud_sync_endpoint_type',
|
|
subcloud_region=subcloud_region,
|
|
endpoint_type_list=endpoint_type_list,
|
|
openstack_installed=openstack_installed))
|
|
|
|
def prestage_subcloud(self, ctxt, payload):
|
|
return self.call(ctxt, self.make_msg('prestage_subcloud',
|
|
payload=payload))
|
|
|
|
def subcloud_deploy_create(self, ctxt, subcloud_id, payload):
|
|
return self.call(ctxt, self.make_msg('subcloud_deploy_create',
|
|
subcloud_id=subcloud_id,
|
|
payload=payload))
|
|
|
|
def subcloud_deploy_install(self, ctxt, subcloud_id, payload,
|
|
initial_deployment):
|
|
return self.cast(ctxt, self.make_msg('subcloud_deploy_install',
|
|
subcloud_id=subcloud_id,
|
|
payload=payload,
|
|
initial_deployment=initial_deployment))
|
|
|
|
def subcloud_deploy_bootstrap(self, ctxt, subcloud_id, payload,
|
|
initial_deployment):
|
|
return self.cast(ctxt, self.make_msg('subcloud_deploy_bootstrap',
|
|
subcloud_id=subcloud_id,
|
|
payload=payload,
|
|
initial_deployment=initial_deployment))
|
|
|
|
def subcloud_deploy_config(self, ctxt, subcloud_id, payload,
|
|
initial_deployment):
|
|
return self.cast(ctxt, self.make_msg('subcloud_deploy_config',
|
|
subcloud_id=subcloud_id,
|
|
payload=payload,
|
|
initial_deployment=initial_deployment))
|
|
|
|
def subcloud_deploy_complete(self, ctxt, subcloud_id):
|
|
return self.call(ctxt, self.make_msg('subcloud_deploy_complete',
|
|
subcloud_id=subcloud_id))
|
|
|
|
def subcloud_deploy_abort(self, ctxt, subcloud_id, deploy_status):
|
|
return self.cast(ctxt, self.make_msg('subcloud_deploy_abort',
|
|
subcloud_id=subcloud_id,
|
|
deploy_status=deploy_status))
|
|
|
|
def subcloud_deploy_resume(self, ctxt, subcloud_id, subcloud_name,
|
|
payload, deploy_states_to_run):
|
|
return self.cast(ctxt, self.make_msg('subcloud_deploy_resume',
|
|
subcloud_id=subcloud_id,
|
|
subcloud_name=subcloud_name,
|
|
payload=payload,
|
|
deploy_states_to_run=deploy_states_to_run))
|
|
|
|
def migrate_subcloud(self, ctxt, subcloud_ref, payload):
|
|
return self.cast(ctxt, self.make_msg('migrate_subcloud',
|
|
subcloud_ref=subcloud_ref,
|
|
payload=payload))
|
|
|
|
def get_subcloud_name_by_region_name(self, ctxt, subcloud_region):
|
|
return self.call(ctxt, self.make_msg('get_subcloud_name_by_region_name',
|
|
subcloud_region=subcloud_region))
|
|
|
|
def batch_migrate_subcloud(self, ctxt, payload):
|
|
return self.cast(ctxt, self.make_msg('batch_migrate_subcloud',
|
|
payload=payload))
|
|
|
|
def sync_subcloud_peer_group(self, ctxt, association_id):
|
|
return self.cast(ctxt, self.make_msg(
|
|
'sync_subcloud_peer_group', association_id=association_id))
|
|
|
|
def update_subcloud_peer_group(self, ctxt, association_id, priority):
|
|
return self.call(ctxt, self.make_msg(
|
|
'sync_subcloud_peer_group', association_id=association_id,
|
|
sync_subclouds=False, priority=priority))
|
|
|
|
def delete_peer_group_association(self, ctxt, association_id):
|
|
return self.call(ctxt, self.make_msg('delete_peer_group_association',
|
|
association_id=association_id))
|
|
|
|
def peer_monitor_notify(self, ctxt):
|
|
return self.call(ctxt, self.make_msg('peer_monitor_notify'))
|
|
|
|
def peer_group_audit_notify(self, ctxt, peer_group_name, payload):
|
|
return self.call(ctxt, self.make_msg('peer_group_audit_notify',
|
|
peer_group_name=peer_group_name,
|
|
payload=payload))
|
|
|
|
|
|
class DCManagerNotifications(RPCClient):
|
|
"""DC Manager Notification interface to broadcast subcloud state changed
|
|
|
|
Version History:
|
|
1.0 - Initial version
|
|
"""
|
|
DCMANAGER_RPC_API_VERSION = '1.0'
|
|
TOPIC_DC_NOTIFICIATION = 'DCMANAGER-NOTIFICATION'
|
|
|
|
def __init__(self, timeout=None):
|
|
super(DCManagerNotifications, self).__init__(
|
|
timeout,
|
|
self.TOPIC_DC_NOTIFICIATION,
|
|
self.DCMANAGER_RPC_API_VERSION)
|
|
|
|
def subcloud_online(self, ctxt, subcloud_name):
|
|
return self.cast(ctxt, self.make_msg('subcloud_online',
|
|
subcloud_name=subcloud_name))
|
|
|
|
def subcloud_managed(self, ctxt, subcloud_name):
|
|
return self.cast(ctxt, self.make_msg('subcloud_managed',
|
|
subcloud_name=subcloud_name))
|
|
|
|
def subcloud_sysinv_endpoint_update(self, ctxt, subcloud_name, endpoint):
|
|
return self.cast(ctxt, self.make_msg(
|
|
'subcloud_sysinv_endpoint_update', subcloud_name=subcloud_name,
|
|
endpoint=endpoint), fanout=True, version=self.DCMANAGER_RPC_API_VERSION)
|