diff --git a/api-ref/source/api-ref-sysinv-v1-config.rst b/api-ref/source/api-ref-sysinv-v1-config.rst index af69f4879a..1718d3ed0f 100644 --- a/api-ref/source/api-ref-sysinv-v1-config.rst +++ b/api-ref/source/api-ref-sysinv-v1-config.rst @@ -266,16 +266,6 @@ itemNotFound (404) "rel": "bookmark" } ], - "upgrade": [ - { - "href": "http://10.10.10.3:6385/v1/upgrade/", - "rel": "self" - }, - { - "href": "http://10.10.10.3:6385/upgrade/", - "rel": "bookmark" - } - ], "datanetworks": [ { "href": "http://10.10.10.3:6385/v1/datanetworks/", diff --git a/sysinv/cgts-client/cgts-client/cgtsclient/tests/v1/test_load.py b/sysinv/cgts-client/cgts-client/cgtsclient/tests/v1/test_load.py deleted file mode 100644 index d15ef6a195..0000000000 --- a/sysinv/cgts-client/cgts-client/cgtsclient/tests/v1/test_load.py +++ /dev/null @@ -1,132 +0,0 @@ -# -# Copyright (c) 2023 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -import testtools - -from cgtsclient.exc import InvalidAttribute -from cgtsclient.tests import utils -from cgtsclient.v1.load import Load -from cgtsclient.v1.load import LoadManager - - -class LoadManagerTest(testtools.TestCase): - def setUp(self): - super(LoadManagerTest, self).setUp() - - self.load = { - 'id': '1', - 'uuid': 'c0d71e4c-f327-45a7-8349-11821a9d44df', - 'state': 'IMPORTED', - 'software_version': '6.0', - 'compatible_version': '6.0', - 'required_patches': '', - } - fixtures = { - '/v1/loads/import_load': - { - 'POST': ( - {}, - self.load, - ), - }, - } - self.api = utils.FakeAPI(fixtures) - self.mgr = LoadManager(self.api) - - -class LoadImportTest(LoadManagerTest): - def setUp(self): - super(LoadImportTest, self).setUp() - - self.load_patch = { - 'path_to_iso': '/home/bootimage.iso', - 'path_to_sig': '/home/bootimage.sig', - 'inactive': False, - 'active': False, - 'local': False, - } - self.load_patch_request_body = { - 'path_to_iso': '/home/bootimage.iso', - 'path_to_sig': '/home/bootimage.sig', - } - - def test_load_import(self): - expected = [ - ( - 'POST', '/v1/loads/import_load', - {}, - self.load_patch_request_body, - {'active': 'false', 'inactive': 'false'}, - ) - ] - - load = self.mgr.import_load(**self.load_patch) - - self.assertEqual(self.api.calls, expected) - self.assertIsInstance(load, Load) - - def test_load_import_active(self): - self.load_patch['active'] = True - - expected = [ - ( - 'POST', '/v1/loads/import_load', - {}, - self.load_patch_request_body, - {'active': 'true', 'inactive': 'false'}, - ) - ] - - load = self.mgr.import_load(**self.load_patch) - - self.assertEqual(self.api.calls, expected) - self.assertIsInstance(load, Load) - - def test_load_import_local(self): - self.load_patch['local'] = True - self.load_patch_request_body['active'] = 'false' - self.load_patch_request_body['inactive'] = 'false' - - expected = [ - ( - 'POST', '/v1/loads/import_load', - {}, - self.load_patch_request_body, - ) - ] - - load = self.mgr.import_load(**self.load_patch) - - self.assertEqual(self.api.calls, expected) - self.assertIsInstance(load, Load) - - def test_load_import_inactive(self): - self.load_patch['inactive'] = True - - expected = [ - ( - 'POST', '/v1/loads/import_load', - {}, - self.load_patch_request_body, - {'active': 'false', 'inactive': 'true'} - ) - ] - - load = self.mgr.import_load(**self.load_patch) - - self.assertEqual(self.api.calls, expected) - self.assertIsInstance(load, Load) - - def test_load_import_invalid_attribute(self): - self.load_patch['foo'] = 'bar' - - self.assertRaises( - InvalidAttribute, - self.mgr.import_load, - **self.load_patch - ) - - self.assertEqual(self.api.calls, []) diff --git a/sysinv/cgts-client/cgts-client/cgtsclient/tests/v1/test_load_shell.py b/sysinv/cgts-client/cgts-client/cgtsclient/tests/v1/test_load_shell.py deleted file mode 100644 index 5d79421093..0000000000 --- a/sysinv/cgts-client/cgts-client/cgtsclient/tests/v1/test_load_shell.py +++ /dev/null @@ -1,215 +0,0 @@ -# -# Copyright (c) 2023 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -from mock import patch - -from cgtsclient.exc import CommandError -from cgtsclient.tests import test_shell -from cgtsclient.v1.load import Load - - -class LoadImportShellTest(test_shell.ShellTest): - def setUp(self): - super(LoadImportShellTest, self).setUp() - - load_import = patch('cgtsclient.v1.load.LoadManager.import_load') - self.mock_load_import = load_import.start() - self.addCleanup(load_import.stop) - - load_show = patch('cgtsclient.v1.load_shell._print_load_show') - self.mock_load_show = load_show.start() - self.addCleanup(load_show.stop) - - load_list = patch('cgtsclient.v1.load.LoadManager.list') - self.mock_load_list = load_list.start() - self.addCleanup(load_list.stop) - - load_resource = { - 'software_version': '6.0', - 'compatible_version': '5.0', - 'required_patches': '', - } - self.load_resouce = Load( - manager=None, - info=load_resource, - loaded=True, - ) - - self.mock_load_import.return_value = self.load_resouce - self.mock_load_list.return_value = [] - self.mock_load_show.return_value = {} - - self.patch_expected = { - 'path_to_iso': '/home/bootimage.iso', - 'path_to_sig': '/home/bootimage.sig', - 'active': False, - 'local': False, - 'inactive': False, - } - - @patch('os.path.isfile', lambda x: True) - def test_load_import(self): - self.make_env() - - cmd = 'load-import /home/bootimage.iso /home/bootimage.sig' - self.shell(cmd) - - self.mock_load_import.assert_called_once() - self.mock_load_list.assert_called_once() - self.mock_load_show.assert_called_once() - - self.mock_load_import.assert_called_with(**self.patch_expected) - - @patch('os.path.abspath') - @patch('os.path.isfile', lambda x: True) - def test_load_import_relative_path(self, mock_abspath): - self.make_env() - - mock_abspath.side_effect = [ - '/home/bootimage.iso', - '/home/bootimage.sig', - ] - - cmd = 'load-import bootimage.iso bootimage.sig' - self.shell(cmd) - - self.mock_load_import.assert_called_once() - self.mock_load_list.assert_called_once() - self.mock_load_show.assert_called_once() - - self.mock_load_import.assert_called_with(**self.patch_expected) - - @patch('os.path.isfile', lambda x: True) - def test_load_import_active(self): - self.make_env() - - self.patch_expected['active'] = True - - cmd = ''' - load-import --active - /home/bootimage.iso - /home/bootimage.sig - ''' - self.shell(cmd) - - self.mock_load_import.assert_called_once() - self.mock_load_show.assert_called_once() - - self.mock_load_import.assert_called_with(**self.patch_expected) - - self.mock_load_list.assert_not_called() - - @patch('os.path.isfile', lambda x: True) - def test_load_import_active_short_form(self): - self.make_env() - - self.patch_expected['active'] = True - - cmd = ''' - load-import -a - /home/bootimage.iso - /home/bootimage.sig - ''' - self.shell(cmd) - - self.mock_load_import.assert_called_once() - self.mock_load_show.assert_called_once() - - self.mock_load_import.assert_called_with(**self.patch_expected) - - self.mock_load_list.assert_not_called() - - @patch('os.path.isfile', lambda x: True) - def test_load_import_local(self): - self.make_env() - - self.patch_expected['local'] = True - - cmd = ''' - load-import --local - /home/bootimage.iso - /home/bootimage.sig - ''' - self.shell(cmd) - - self.mock_load_import.assert_called_once() - self.mock_load_list.assert_called_once() - self.mock_load_show.assert_called_once() - - self.mock_load_import.assert_called_with(**self.patch_expected) - - @patch('os.path.isfile', lambda x: True) - def test_load_import_inactive(self): - self.make_env() - - self.patch_expected['inactive'] = True - - cmd = ''' - load-import --inactive - /home/bootimage.iso - /home/bootimage.sig - ''' - self.shell(cmd) - - self.mock_load_import.assert_called_once() - self.mock_load_show.assert_called_once() - self.mock_load_list.assert_not_called() - - self.mock_load_import.assert_called_with(**self.patch_expected) - - @patch('os.path.isfile', lambda x: True) - def test_load_import_inactive_short_form(self): - self.make_env() - - self.patch_expected['inactive'] = True - - cmd = ''' - load-import -i - /home/bootimage.iso - /home/bootimage.sig - ''' - self.shell(cmd) - - self.mock_load_import.assert_called_once() - self.mock_load_show.assert_called_once() - self.mock_load_list.assert_not_called() - - self.mock_load_import.assert_called_with(**self.patch_expected) - - @patch('os.path.isfile', lambda x: True) - def test_load_import_max_imported(self): - self.make_env() - - self.mock_load_list.return_value = [ - { - 'id': 1, - 'state': 'ACTIVE', - 'software_version': '5', - }, - { - 'id': 2, - 'state': 'IMPORTED', - 'software_version': '6', - }, - ] - - cmd = 'load-import bootimage.iso bootimage.sig' - self.assertRaises(CommandError, self.shell, cmd) - - self.mock_load_list.assert_called_once() - - self.mock_load_import.assert_not_called() - self.mock_load_show.assert_not_called() - - def test_load_import_invalid_path(self): - self.make_env() - - cmd = 'load-import bootimage.iso bootimage.sig' - self.assertRaises(CommandError, self.shell, cmd) - - self.mock_load_import.assert_not_called() - self.mock_load_list.assert_not_called() - self.mock_load_show.assert_not_called() diff --git a/sysinv/cgts-client/cgts-client/cgtsclient/v1/iHost_shell.py b/sysinv/cgts-client/cgts-client/cgtsclient/v1/iHost_shell.py index f65317a8b3..52dcd69ade 100755 --- a/sysinv/cgts-client/cgts-client/cgtsclient/v1/iHost_shell.py +++ b/sysinv/cgts-client/cgts-client/cgtsclient/v1/iHost_shell.py @@ -13,12 +13,10 @@ from collections import OrderedDict import os from cgtsclient._i18n import _ -from cgtsclient.common import constants from cgtsclient.common import utils from cgtsclient import exc from cgtsclient.v1 import ihost as ihost_utils from cgtsclient.v1 import istor as istor_utils -from six.moves import input def _print_ihost_show(ihost, columns=None, output_format=None): @@ -33,7 +31,7 @@ def _print_ihost_show(ihost, columns=None, output_format=None): 'config_target', 'config_status', 'location', 'uptime', 'reserved', 'created_at', 'updated_at', 'boot_device', 'rootfs_device', 'hw_settle', 'install_output', 'console', - 'tboot', 'vim_progress_status', 'software_load', + 'tboot', 'vim_progress_status', 'install_state', 'install_state_info', 'inv_state', 'clock_synchronization', 'device_image_update', 'reboot_needed', 'max_cpu_mhz_configured', @@ -117,16 +115,6 @@ def do_host_list(cc, args): output_format=args.format) -def do_host_upgrade_list(cc, args): - """List software upgrade info for hosts.""" - ihosts = cc.ihost.list() - field_labels = ['id', 'hostname', 'personality', - 'running_release', 'target_release'] - fields = ['id', 'hostname', 'personality', - 'software_load', 'target_load'] - utils.print_list(ihosts, fields, field_labels, sortby=0) - - def do_kube_host_upgrade_list(cc, args): """List kubernetes upgrade info for hosts.""" @@ -566,66 +554,6 @@ def do_host_bulk_export(cc, args): return -@utils.arg('hostid', - metavar='', - help="Name or ID of host") -@utils.arg('-f', '--force', - action='store_true', - default=False, - help="Force the downgrade operation ") -def do_host_downgrade(cc, args): - """Perform software downgrade for the specified host.""" - ihost_utils._find_ihost(cc, args.hostid) - system_type, system_mode = utils._get_system_info(cc) - simplex = system_mode == constants.SYSTEM_MODE_SIMPLEX - - if simplex: - warning_message = ( - '\n' - 'WARNING: THIS OPERATION WILL COMPLETELY ERASE ALL DATA FROM THE ' - 'SYSTEM.\n' - 'Only proceed once the system data has been copied to another ' - 'system.\n' - 'Are you absolutely sure you want to continue? [yes/N]: ') - confirm = input(warning_message) - if confirm != 'yes': - print("Operation cancelled.") - return - - ihost = cc.ihost.downgrade(args.hostid, args.force) - _print_ihost_show(ihost) - - -@utils.arg('hostid', - metavar='', - help="Name or ID of host") -@utils.arg('-f', '--force', - action='store_true', - default=False, - help="Force the upgrade operation ") -def do_host_upgrade(cc, args): - """Perform software upgrade for a host.""" - ihost_utils._find_ihost(cc, args.hostid) - system_type, system_mode = utils._get_system_info(cc) - simplex = system_mode == constants.SYSTEM_MODE_SIMPLEX - - if simplex: - warning_message = ( - '\n' - 'WARNING: THIS OPERATION WILL COMPLETELY ERASE ALL DATA FROM THE ' - 'SYSTEM.\n' - 'Only proceed once the system data has been copied to another ' - 'system.\n' - 'Are you absolutely sure you want to continue? [yes/N]: ') - confirm = input(warning_message) - if confirm != 'yes': - print("Operation cancelled.") - return - - ihost = cc.ihost.upgrade(args.hostid, args.force) - _print_ihost_show(ihost) - - @utils.arg('hostid', metavar='', help="Name or ID of host") diff --git a/sysinv/cgts-client/cgts-client/cgtsclient/v1/ihost.py b/sysinv/cgts-client/cgts-client/cgtsclient/v1/ihost.py index 3bb2207729..de9b3282c7 100644 --- a/sysinv/cgts-client/cgts-client/cgtsclient/v1/ihost.py +++ b/sysinv/cgts-client/cgts-client/cgtsclient/v1/ihost.py @@ -73,20 +73,6 @@ class ihostManager(base.Manager): raise exc.InvalidAttribute() return self._create(self._path(), new) - def upgrade(self, hostid, force): - new = {} - new['force'] = force - resp, body = self.api.json_request( - 'POST', self._path(hostid) + "/upgrade", body=new) - return self.resource_class(self, body) - - def downgrade(self, hostid, force): - new = {} - new['force'] = force - resp, body = self.api.json_request( - 'POST', self._path(hostid) + "/downgrade", body=new) - return self.resource_class(self, body) - def create_many(self, body): return self._upload(self._path() + "/bulk_add", body) diff --git a/sysinv/cgts-client/cgts-client/cgtsclient/v1/load_shell.py b/sysinv/cgts-client/cgts-client/cgtsclient/v1/load_shell.py deleted file mode 100644 index 3c81681f34..0000000000 --- a/sysinv/cgts-client/cgts-client/cgtsclient/v1/load_shell.py +++ /dev/null @@ -1,157 +0,0 @@ -# -# Copyright (c) 2015-2023 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# All Rights Reserved. -# - -from cgtsclient.common import utils -from cgtsclient import exc -import os.path -from oslo_utils._i18n import _ -import sys -import threading -import time - -IMPORTED_LOAD_MAX_COUNT = 1 - - -def _print_load_show(load): - fields = ['id', 'state', 'software_version', 'compatible_version', - 'required_patches'] - data = [(f, getattr(load, f, '')) for f in fields] - utils.print_tuple_list(data) - - -@utils.arg('loadid', - metavar='', - help="ID of load") -def do_load_show(cc, args): - """Show load attributes.""" - load = cc.load.get(args.loadid) - - _print_load_show(load) - - -def do_load_list(cc, args): - """List all loads.""" - loads = cc.load.list() - - field_labels = ['id', 'state', 'software_version'] - fields = ['id', 'state', 'software_version'] - utils.print_list(loads, fields, field_labels, sortby=0) - - -@utils.arg('loadid', - metavar='', - help="ID of load") -def do_load_delete(cc, args): - """Delete a load.""" - - load = cc.load.get(args.loadid) - - try: - cc.load.delete(load.uuid) - except exc.HTTPNotFound: - raise exc.CommandError('Delete load failed: load %s' % args.loadid) - - print('Deleted load: load %s' % args.loadid) - - -@utils.arg('isopath', - metavar='', - help="The full path of the iso to import [REQUIRED]") -@utils.arg('sigpath', - metavar='', - help="The full path of the detached signature file corresponding to the iso [REQUIRED]") -@utils.arg('-a', '--active', - action='store_true', - default=False, - help=("Perform an active load import operation. " - "Applicable only for SystemController to allow import of " - "an active load for subcloud install")) -@utils.arg('-i', '--inactive', - action='store_true', - default=False, - help=("Perform an inactive load import operation. " - "Import a previous release load for subcloud install")) -@utils.arg('--local', - action='store_true', - default=False, - help=("Import the load locally from the active controller. " - "To use this option, first upload the .iso and .sig files to " - "the active controller and then specify the absolute path of " - "both files as 'isopath' and 'sigpath'")) -def do_load_import(cc, args): - """Import a load.""" - - local = args.local - active = args.active - inactive = args.inactive - - # If absolute path is not specified, we assume it is the relative path. - # args.isopath will then be set to the absolute path - if not os.path.isabs(args.isopath): - args.isopath = os.path.abspath(args.isopath) - - if not os.path.isabs(args.sigpath): - args.sigpath = os.path.abspath(args.sigpath) - - if not os.path.isfile(args.isopath): - raise exc.CommandError(_("File %s does not exist." % args.isopath)) - - if not os.path.isfile(args.sigpath): - raise exc.CommandError(_("File %s does not exist." % args.sigpath)) - - if not active and not inactive: - # The following logic is taken from sysinv api as it takes a while for - # this large POST request to reach the server. - # - # Ensure the request does not exceed load import limit before sending. - - loads = cc.load.list() - if len(loads) > IMPORTED_LOAD_MAX_COUNT: - raise exc.CommandError(_( - "Max number of loads (2) reached. Please remove the " - "old or unused load before importing a new one.")) - - patch = { - 'path_to_iso': args.isopath, - 'path_to_sig': args.sigpath, - 'inactive': inactive, - 'active': active, - 'local': local, - } - - try: - print("This operation will take a while. Please wait.") - wait_task = WaitThread() - wait_task.start() - imported_load = cc.load.import_load(**patch) - wait_task.join() - except Exception as e: - wait_task.join() - raise exc.CommandError(_("Load import failed. Reason: %s" % e)) - else: - _print_load_show(imported_load) - - -class WaitThread(threading.Thread): - def __init__(self): - super(WaitThread, self).__init__() - self.stop = threading.Event() - - def run(self): - while not self.stop.is_set(): - sys.stdout.write(".") - sys.stdout.flush() - time.sleep(10) - - def join(self, timeout=None): # pylint: disable=arguments-differ - self.stop.set() - super(WaitThread, self).join(timeout) - sys.stdout.write("\n") - sys.stdout.flush() diff --git a/sysinv/cgts-client/cgts-client/cgtsclient/v1/shell.py b/sysinv/cgts-client/cgts-client/cgtsclient/v1/shell.py index 930b80ed22..e74939a98c 100644 --- a/sysinv/cgts-client/cgts-client/cgtsclient/v1/shell.py +++ b/sysinv/cgts-client/cgts-client/cgtsclient/v1/shell.py @@ -50,7 +50,6 @@ from cgtsclient.v1 import label_shell from cgtsclient.v1 import license_shell from cgtsclient.v1 import lldp_agent_shell from cgtsclient.v1 import lldp_neighbour_shell -from cgtsclient.v1 import load_shell from cgtsclient.v1 import network_addrpool_shell from cgtsclient.v1 import network_shell from cgtsclient.v1 import partition_shell @@ -72,7 +71,6 @@ from cgtsclient.v1 import sm_service_shell from cgtsclient.v1 import sm_servicegroup_shell from cgtsclient.v1 import storage_backend_shell from cgtsclient.v1 import storage_tier_shell -from cgtsclient.v1 import upgrade_shell COMMAND_MODULES = [ @@ -108,9 +106,7 @@ COMMAND_MODULES = [ route_shell, isensor_shell, isensorgroup_shell, - load_shell, pci_device_shell, - upgrade_shell, network_shell, network_addrpool_shell, interface_network_shell, diff --git a/sysinv/cgts-client/cgts-client/cgtsclient/v1/upgrade_shell.py b/sysinv/cgts-client/cgts-client/cgtsclient/v1/upgrade_shell.py deleted file mode 100755 index fdd326e082..0000000000 --- a/sysinv/cgts-client/cgts-client/cgtsclient/v1/upgrade_shell.py +++ /dev/null @@ -1,173 +0,0 @@ -# -# Copyright (c) 2015-2016 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# All Rights Reserved. -# - -from cgtsclient.common import constants -from cgtsclient.common import utils -from cgtsclient import exc -from six.moves import input - - -def _print_upgrade_show(obj): - fields = ['uuid', 'state', 'from_release', 'to_release'] - data = [(f, getattr(obj, f, '')) for f in fields] - utils.print_tuple_list(data) - - -def do_upgrade_show(cc, args): - """Show software upgrade details and attributes.""" - - upgrades = cc.upgrade.list() - if upgrades: - _print_upgrade_show(upgrades[0]) - else: - print('No upgrade in progress') - - -@utils.arg('-f', '--force', - action='store_true', - default=False, - help="Ignore non management-affecting alarms") -def do_upgrade_start(cc, args): - """Start a software upgrade. """ - - upgrade = cc.upgrade.create(args.force) - uuid = getattr(upgrade, 'uuid', '') - try: - upgrade = cc.upgrade.get(uuid) - except exc.HTTPNotFound: - raise exc.CommandError('Created upgrade UUID not found: %s' % uuid) - _print_upgrade_show(upgrade) - - -def do_upgrade_activate(cc, args): - """Activate a software upgrade.""" - - data = dict() - data['state'] = constants.UPGRADE_ACTIVATION_REQUESTED - - patch = [] - for (k, v) in data.items(): - patch.append({'op': 'replace', 'path': '/' + k, 'value': v}) - try: - upgrade = cc.upgrade.update(patch) - except exc.HTTPNotFound: - raise exc.CommandError('Upgrade UUID not found') - _print_upgrade_show(upgrade) - - -def do_upgrade_abort(cc, args): - """Abort a software upgrade.""" - try: - body = cc.upgrade.check_reinstall() - except Exception: - raise exc.CommandError('Error getting upgrade state') - - reinstall_necessary = body.get('reinstall_necessary', None) - - abort_required = False - system_type, system_mode = utils._get_system_info(cc) - - is_cpe = system_type == constants.TS_AIO - simplex = system_mode == constants.SYSTEM_MODE_SIMPLEX - if simplex: - if reinstall_necessary: - warning_message = ( - '\n' - 'WARNING: THIS OPERATION WILL RESULT IN A COMPLETE SYSTEM ' - 'OUTAGE.\n' - 'It will require this host to be reinstalled and the system ' - 'restored with the previous version. ' - 'The system will be restored to when the upgrade was started.' - '\n\n' - 'Are you absolutely sure you want to continue? [yes/N]: ') - abort_required = True - else: - warning_message = ( - '\n' - 'WARNING: This will stop the upgrade process. The system ' - 'backup created during the upgrade-start will be removed.\n\n' - 'Continue [yes/N]: ') - elif reinstall_necessary: - warning_message = ( - '\n' - 'WARNING: THIS OPERATION WILL RESULT IN A COMPLETE SYSTEM ' - 'OUTAGE.\n' - 'It will require every host in the system to be powered down and ' - 'then reinstalled to recover. All instances will be lost, ' - 'including their disks. You will only be able to recover ' - 'instances if you have external backups for their data.\n' - 'This operation should be done as a last resort, if there is ' - 'absolutely no other way to recover the system.\n\n' - 'Are you absolutely sure you want to continue? [yes/N]: ') - abort_required = True - else: - if is_cpe: - warning_message = ( - '\n' - 'WARNING: THIS OPERATION WILL IMPACT RUNNING INSTANCES.\n' - 'Any instances that have been migrated after the upgrade was ' - 'started will be lost, including their disks. You will only ' - 'be able to recover instances if you have external backups ' - 'for their data.\n' - 'This operation should be done as a last resort, if there is ' - 'absolutely no other way to recover the system.\n\n' - 'Are you absolutely sure you want to continue? [yes/N]: ') - abort_required = True - else: - warning_message = ( - '\n' - 'WARNING: By continuing this operation, you will be forced to ' - 'downgrade any hosts that have been upgraded. The system will ' - 'revert to the state when controller-0 was last active.\n\n' - 'Continue [yes/N]: ') - - confirm = input(warning_message) - if confirm != 'yes': - print("Operation cancelled.") - return - elif abort_required: - confirm = input("Type 'abort' to confirm: ") - if confirm != 'abort': - print("Operation cancelled.") - return - - data = dict() - data['state'] = constants.UPGRADE_ABORTING - - patch = [] - for (k, v) in data.items(): - patch.append({'op': 'replace', 'path': '/' + k, 'value': v}) - try: - upgrade = cc.upgrade.update(patch) - except exc.HTTPNotFound: - raise exc.CommandError('Upgrade UUID not found') - _print_upgrade_show(upgrade) - - -def do_upgrade_complete(cc, args): - """Complete a software upgrade.""" - - try: - upgrade = cc.upgrade.delete() - except exc.HTTPNotFound: - raise exc.CommandError('Upgrade not found') - - _print_upgrade_show(upgrade) - - -def do_upgrade_abort_complete(cc, args): - """Complete a software upgrade.""" - - try: - upgrade = cc.upgrade.delete() - except exc.HTTPNotFound: - raise exc.CommandError('Upgrade not found') - - _print_upgrade_show(upgrade) diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/host.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/host.py index 3ba123a6dd..8649be35c5 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/host.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/host.py @@ -2834,173 +2834,14 @@ class HostController(rest.RestController): def upgrade(self, uuid, body): """Upgrade the host to the specified load""" - # There must be an upgrade in progress - try: - upgrade = pecan.request.dbapi.software_upgrade_get_one() - except exception.NotFound: - raise wsme.exc.ClientSideError(_( - "host-upgrade rejected: An upgrade is not in progress.")) - - if upgrade.state in [constants.UPGRADE_ABORTING_ROLLBACK, - constants.UPGRADE_ABORTING]: - raise wsme.exc.ClientSideError(_( - "host-upgrade rejected: Aborting Upgrade.")) - - # Enforce upgrade order - loads = pecan.request.dbapi.load_get_list() - new_target_load = cutils.get_imported_load(loads) - rpc_ihost = objects.host.get_by_uuid(pecan.request.context, uuid) - - if rpc_ihost.personality == constants.EDGEWORKER: - raise wsme.exc.ClientSideError(_( - "host-upgrade rejected: Not supported for EDGEWORKER node.")) - - simplex = (utils.get_system_mode() == constants.SYSTEM_MODE_SIMPLEX) - # If this is a simplex system skip this check, there's no other nodes - if simplex: - pass - elif rpc_ihost.personality == constants.WORKER: - self._check_personality_load(constants.CONTROLLER, new_target_load) - self._check_personality_load(constants.STORAGE, new_target_load) - elif rpc_ihost.personality == constants.STORAGE: - self._check_personality_load(constants.CONTROLLER, new_target_load) - # Ensure we upgrade storage-0 before other storage nodes - if rpc_ihost.hostname != constants.STORAGE_0_HOSTNAME: - self._check_host_load(constants.STORAGE_0_HOSTNAME, - new_target_load) - elif rpc_ihost.hostname == constants.CONTROLLER_0_HOSTNAME: - self._check_host_load(constants.CONTROLLER_1_HOSTNAME, - new_target_load) - - # Check upgrade state - if rpc_ihost.hostname == constants.CONTROLLER_1_HOSTNAME or simplex: - if upgrade.state != constants.UPGRADE_STARTED: - raise wsme.exc.ClientSideError(_( - "host-upgrade rejected: Upgrade not in %s state." % - constants.UPGRADE_STARTED)) - elif rpc_ihost.hostname == constants.CONTROLLER_0_HOSTNAME: - if upgrade.state != constants.UPGRADE_UPGRADING_CONTROLLERS: - raise wsme.exc.ClientSideError(_( - "host-upgrade rejected: Upgrade not in %s state." % - constants.UPGRADE_UPGRADING_CONTROLLERS)) - elif upgrade.state != constants.UPGRADE_UPGRADING_HOSTS: - raise wsme.exc.ClientSideError(_( - "host-upgrade rejected: Upgrade not in %s state." % - constants.UPGRADE_UPGRADING_HOSTS)) - - if rpc_ihost.personality == constants.STORAGE: - osd_status = self._ceph.check_osds_down_up(rpc_ihost.hostname, True) - if not osd_status: - raise wsme.exc.ClientSideError( - _("Host %s must be locked and " - "all osds must be down.") - % (rpc_ihost.hostname)) - - # Update the target load for this host - self._update_load(uuid, body, new_target_load) - - if rpc_ihost.hostname == constants.CONTROLLER_1_HOSTNAME: - # When controller-1 is upgraded, we do the data migration - upgrade_update = {'state': constants.UPGRADE_DATA_MIGRATION} - pecan.request.dbapi.software_upgrade_update(upgrade.uuid, - upgrade_update) - - # Set upgrade flag so controller-1 will upgrade after install - # This flag is guaranteed to be written on controller-0, since - # controller-1 must be locked to run the host-upgrade command. - # perform rpc to conductor to do the update with root privilege access - pecan.request.rpcapi.update_controller_upgrade_flag(pecan.request.context) - - pecan.request.dbapi.ihost_update(uuid, - {'invprovision': constants.UPGRADING}) - return Host.convert_with_links(rpc_ihost) + raise NotImplementedError("This API is deprecated.") @cutils.synchronized(LOCK_NAME) @wsme_pecan.wsexpose(Host, six.text_type, body=six.text_type) def downgrade(self, uuid, body): """Downgrade the host to the specified load""" - # There must be an upgrade in progress - try: - upgrade = pecan.request.dbapi.software_upgrade_get_one() - except exception.NotFound: - raise wsme.exc.ClientSideError(_( - "host-downgrade rejected: An upgrade is not in progress.")) - - loads = pecan.request.dbapi.load_get_list() - new_target_load = cutils.get_active_load(loads) - rpc_ihost = objects.host.get_by_uuid(pecan.request.context, uuid) - - if rpc_ihost.personality == constants.EDGEWORKER: - raise wsme.exc.ClientSideError(_( - "host-downgrade rejected: Not supported for EDGEWORKER node.")) - - simplex = (utils.get_system_mode() == constants.SYSTEM_MODE_SIMPLEX) - - # If this is a simplex upgrade just check that we are aborting - if simplex: - if upgrade.state not in [constants.UPGRADE_ABORTING_ROLLBACK, - constants.UPGRADE_ABORTING]: - raise wsme.exc.ClientSideError( - _("host-downgrade rejected: The upgrade must be aborted " - "before downgrading.")) - # Check if we're doing a rollback - elif upgrade.state == constants.UPGRADE_ABORTING_ROLLBACK: - if rpc_ihost.hostname == constants.CONTROLLER_0_HOSTNAME: - # Before we downgrade controller-0 during a rollback/reinstall - # we check that all other worker/storage nodes are locked and - # offline. We also set a flag on controller-1 to indicate we - # are in a rollback. When controller-0 comes up it will check - # for this flag and update its database as necessary. - self._semantic_check_rollback() - - # the upgrade rollback flag can only be created by root so - # send an rpc request to sysinv-conductor to create the flag - pecan.request.rpcapi.update_controller_rollback_flag( - pecan.request.context) - elif rpc_ihost.hostname == constants.CONTROLLER_1_HOSTNAME: - self._check_host_load(constants.CONTROLLER_0_HOSTNAME, - new_target_load) - else: - raise wsme.exc.ClientSideError(_( - "host-downgrade rejected: Rollback is in progress.")) - else: - # Enforce downgrade order - if rpc_ihost.personality == constants.CONTROLLER: - self._check_personality_load(constants.WORKER, - new_target_load) - self._check_personality_load(constants.STORAGE, - new_target_load) - if rpc_ihost.hostname == constants.CONTROLLER_1_HOSTNAME: - self._check_host_load(constants.CONTROLLER_0_HOSTNAME, - new_target_load) - elif rpc_ihost.personality == constants.STORAGE: - self._check_personality_load(constants.WORKER, - new_target_load) - if rpc_ihost.hostname == constants.STORAGE_0_HOSTNAME: - self._check_storage_downgrade(new_target_load) - # else we should be a worker node, no need to check other nodes - - # Check upgrade state - if rpc_ihost.hostname in [constants.CONTROLLER_0_HOSTNAME, - constants.CONTROLLER_1_HOSTNAME]: - # The controllers are the last nodes to be downgraded. - # There is no way to continue the upgrade after that, - # so force the user to specifically abort the upgrade - # before doing this. - if upgrade.state != constants.UPGRADE_ABORTING: - raise wsme.exc.ClientSideError(_( - "host-downgrade rejected: Upgrade not in %s state." % - constants.UPGRADE_ABORTING)) - - # Remove the host manifest. This is similar to the process taken - # during host-reinstall. The manifest needs to be removed to prevent - # the host from running kubeadm prematurely. - pecan.request.rpcapi.remove_host_config(pecan.request.context, uuid) - - self._update_load(uuid, body, new_target_load) - - return Host.convert_with_links(rpc_ihost) + raise NotImplementedError("This API is deprecated.") def _semantic_check_rollback(self): hosts = pecan.request.dbapi.ihost_get_list() diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/load.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/load.py index 472a485844..8a451d3760 100644 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/load.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/load.py @@ -20,7 +20,6 @@ import json -import jsonpatch import os import pecan from pecan import rest @@ -28,7 +27,6 @@ import psutil import six import shutil import socket -import sys import wsme from wsme import types as wtypes import wsmeext.pecan as wsme_pecan @@ -43,14 +41,12 @@ from sysinv.api.controllers.v1 import collection from sysinv.api.controllers.v1 import link from sysinv.api.controllers.v1 import types from sysinv.api.controllers.v1 import utils -from sysinv.cert_mon import utils as cert_utils from sysinv.common import constants from sysinv.common import exception from sysinv.common import utils as cutils from sysinv import objects from sysinv.openstack.common import rpc from sysinv.openstack.common.rpc import common -import tsconfig.tsconfig as tsc LOG = log.getLogger(__name__) @@ -185,31 +181,20 @@ class LoadController(rest.RestController): wtypes.text) def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of loads.""" - - return self._get_loads_collection(marker, limit, sort_key, sort_dir) + raise NotImplementedError("This API is deprecated.") @wsme_pecan.wsexpose(LoadCollection, types.uuid, int, wtypes.text, wtypes.text) def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of loads with detail.""" - parent = pecan.request.path.split('/')[:-1][-1] - if parent != "loads": - raise exception.HTTPNotFound - - expand = True - resource_url = '/'.join(['loads', 'detail']) - return self._get_loads_collection(marker, limit, sort_key, sort_dir, - expand, resource_url) + raise NotImplementedError("This API is deprecated.") @wsme_pecan.wsexpose(Load, six.text_type) def get_one(self, load_uuid): """Retrieve information about the given Load.""" - rpc_load = objects.load.get_by_uuid( - pecan.request.context, load_uuid) - - return Load.convert_with_links(rpc_load) + raise NotImplementedError("This API is deprecated.") @staticmethod def _new_load_semantic_checks(load): @@ -227,6 +212,7 @@ class LoadController(rest.RestController): # This method is only used to populate the inital load for the system # This is invoked during config_controller # Loads after the first are added via import + # TODO(ShawnLi): This will be removed when we remove the Load table loads = pecan.request.dbapi.load_get_list() if loads: @@ -315,14 +301,7 @@ class LoadController(rest.RestController): @cutils.synchronized(LOCK_NAME) def import_load(self): """Import a load from iso/sig files""" - try: - return self._import_load() - except Exception as e: - # Duplicate the exception handling behavior of the wsmeext.pecan wsexpose decorator - # This can be moved to a decorator if we need to reuse this in other modules - exception_code = getattr(e, 'code', None) - pecan.response.status = exception_code if wsme.utils.is_valid_code(exception_code) else 500 - return wsme.api.format_exception(sys.exc_info()) + raise NotImplementedError("This API is deprecated.") def _import_load(self): """Create a new load from iso/sig files""" @@ -446,36 +425,7 @@ class LoadController(rest.RestController): def import_load_metadata(self, load): """Import a new load using only the metadata. Only available to SX subcoulds.""" - LOG.info("Load import metadata request received.") - err_msg = None - - # Enforce system type restrictions - err_msg = _("Metadata load import is only available to simplex subclouds.") - if utils.get_system_mode() != constants.SYSTEM_MODE_SIMPLEX: - raise wsme.exc.ClientSideError(err_msg) - if utils.get_distributed_cloud_role() != constants.DISTRIBUTED_CLOUD_ROLE_SUBCLOUD: - raise wsme.exc.ClientSideError(err_msg) - - self._check_existing_loads() - - if load.software_version == load.compatible_version: - raise wsme.exc.ClientSideError(_("Invalid load software_version.")) - if load.compatible_version != tsc.SW_VERSION: - raise wsme.exc.ClientSideError(_("Load compatible_version does not match SW_VERSION.")) - - patch = load.as_dict() - self._new_load_semantic_checks(patch) - patch['state'] = constants.IMPORTED_METADATA_LOAD_STATE - patch['uuid'] = None - - LOG.info("Load import metadata validated, creating new load: %s" % patch) - try: - new_load = pecan.request.dbapi.load_create(patch) - except exception.SysinvException: - LOG.exception("Failure to create load") - raise wsme.exc.ClientSideError(_("Failure to create load")) - - return load.convert_with_links(new_load) + raise NotImplementedError("This API is deprecated.") def _check_existing_loads(self, import_type=None): # Only are allowed at one time: @@ -529,86 +479,11 @@ class LoadController(rest.RestController): def patch(self, load_id, patch): """Update an existing load.""" - # TODO (dsulliva) - # This is a stub. We will need to place reasonable limits on what can - # be patched as we add to the upgrade system. This portion of the API - # likely will not be publicly accessible. - rpc_load = objects.load.get_by_uuid(pecan.request.context, load_id) - - utils.validate_patch(patch) - patch_obj = jsonpatch.JsonPatch(patch) - - try: - load = Load(**jsonpatch.apply_patch(rpc_load.as_dict(), patch_obj)) - - except utils.JSONPATCH_EXCEPTIONS as e: - raise exception.PatchError(patch=patch, reason=e) - - fields = objects.load.fields - - for field in fields: - if rpc_load[field] != getattr(load, field): - rpc_load[field] = getattr(load, field) - - rpc_load.save() - - return Load.convert_with_links(rpc_load) + raise NotImplementedError("This API is deprecated.") @cutils.synchronized(LOCK_NAME) @wsme_pecan.wsexpose(Load, six.text_type, status_code=200) def delete(self, load_id): """Delete a load.""" - load = pecan.request.dbapi.load_get(load_id) - - # make sure the load isn't in use by an upgrade - try: - # NOTE(bqian) load relates only to the legacy upgrade - upgrade = pecan.request.dbapi.software_upgrade_get_one() - except exception.NotFound: - pass - else: - if load.id == upgrade.to_load or load.id == upgrade.from_load: - raise wsme.exc.ClientSideError( - _("Unable to delete load, load in use by upgrade")) - - # make sure the load isn't used by any hosts - hosts = pecan.request.dbapi.host_upgrade_get_list() - for host in hosts: - if host.target_load == load.id or host.software_load == load.id: - raise wsme.exc.ClientSideError(_( - "Unable to delete load, load in use by host (id: %s)") - % host.forihostid) - - # make sure there are no subclouds with current load different from central - # cloud current load - system = pecan.request.dbapi.isystem_get_one() - if load.state == constants.IMPORTED_LOAD_STATE and \ - system.distributed_cloud_role == constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER: - subclouds = [] - - try: - # TODO move the cert_utils to a more common place - cert_utils.init_keystone_auth_opts() - token_cache = cert_utils.TokenCache(constants.OS_INTERFACE_INTERNAL) - all_subclouds = cert_utils.get_subclouds_from_dcmanager(token_cache.get_token()) - for sc in all_subclouds: - subcloud = cert_utils.get_subcloud(token_cache.get_token(), sc['name']) - if subcloud['management-state'] == cert_utils.MANAGEMENT_MANAGED \ - and subcloud['software-version'] == load.software_version: - subclouds.append(sc['name']) - - except Exception as err: - LOG.error("Unexpected error to get subclouds software version: %s", err) - raise wsme.exc.ClientSideError(_("Failed to detect if the load can be safely deleted.")) - - if subclouds: - raise wsme.exc.ClientSideError(_( - "Unable to delete load, %i subclouds are not upgraded yet. " - "Some of these include %s") % (len(subclouds), subclouds[:10])) - - cutils.validate_load_for_delete(load) - - pecan.request.rpcapi.delete_load(pecan.request.context, load_id) - - return Load.convert_with_links(load) + raise NotImplementedError("This API is deprecated.") diff --git a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/upgrade.py b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/upgrade.py index 04dbb9588f..81764c68c8 100755 --- a/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/upgrade.py +++ b/sysinv/sysinv/sysinv/sysinv/api/controllers/v1/upgrade.py @@ -8,25 +8,20 @@ # coding=utf-8 # -import os import pecan from pecan import rest from pecan import expose import six -import socket import wsme from wsme import types as wtypes import wsmeext.pecan as wsme_pecan -import tsconfig.tsconfig as tsc from oslo_log import log -from sysinv._i18n import _ from sysinv.api.controllers.v1 import base from sysinv.api.controllers.v1 import collection from sysinv.api.controllers.v1 import link from sysinv.api.controllers.v1 import types from sysinv.api.controllers.v1 import utils -from sysinv.api.controllers.v1 import vim_api from sysinv.common import exception from sysinv.common import utils as cutils from sysinv.common import constants @@ -171,332 +166,48 @@ class UpgradeController(rest.RestController): @expose('json') def check_reinstall(self): - reinstall_necessary = False - try: - upgrade = pecan.request.dbapi.software_upgrade_get_one() - except exception.NotFound: - pass - else: - controller_0 = pecan.request.dbapi.ihost_get_by_hostname( - constants.CONTROLLER_0_HOSTNAME) - host_upgrade = pecan.request.dbapi.host_upgrade_get_by_host( - controller_0.id) - - if host_upgrade.target_load == upgrade.to_load or \ - host_upgrade.software_load == upgrade.to_load: - reinstall_necessary = True - - return {'reinstall_necessary': reinstall_necessary} + raise NotImplementedError("This API is deprecated.") @expose('json') def get_upgrade_msg(self): - output = '' - try: - with open(ERROR_FILE, 'r') as error_file: - output = error_file.read() - except Exception: - LOG.warning("Error opening file %s" % ERROR_FILE) - - return output + raise NotImplementedError("This API is deprecated.") @wsme_pecan.wsexpose(UpgradeCollection, types.uuid, int, wtypes.text, wtypes.text) def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of upgrades.""" - return self._get_upgrade_collection(marker, limit, sort_key, sort_dir) + raise NotImplementedError("This API is deprecated.") @wsme_pecan.wsexpose(Upgrade, types.uuid) def get_one(self, uuid): """Retrieve information about the given upgrade.""" - rpc_upgrade = objects.software_upgrade.get_by_uuid( - pecan.request.context, uuid) - return Upgrade.convert_with_links(rpc_upgrade) + raise NotImplementedError("This API is deprecated.") @cutils.synchronized(LOCK_NAME) @wsme_pecan.wsexpose(Upgrade, body=six.text_type) def post(self, body): """Create a new Software Upgrade instance and start upgrade.""" - # Only start the upgrade from controller-0 - if socket.gethostname() != constants.CONTROLLER_0_HOSTNAME: - raise wsme.exc.ClientSideError(_( - "upgrade-start rejected: An upgrade can only be started " - "when %s is active." % constants.CONTROLLER_0_HOSTNAME)) - - # There must not be a kubernetes upgrade in progress - try: - pecan.request.dbapi.kube_upgrade_get_one() - except exception.NotFound: - pass - else: - raise wsme.exc.ClientSideError(_( - "upgrade-start rejected: A platform upgrade cannot be done " - "while a kubernetes upgrade is in progress.")) - - # There must not already be an upgrade in progress - try: - pecan.request.dbapi.software_upgrade_get_one() - except exception.NotFound: - pass - else: - raise wsme.exc.ClientSideError(_( - "upgrade-start rejected: An upgrade is already in progress.")) - - # Determine the from_load and to_load - loads = pecan.request.dbapi.load_get_list() - from_load = cutils.get_active_load(loads) - from_version = from_load.software_version - to_load = cutils.get_imported_load(loads) - to_version = to_load.software_version - - controller_0 = pecan.request.dbapi.ihost_get_by_hostname( - constants.CONTROLLER_0_HOSTNAME) - - force = body.get('force', False) is True - - try: - # Set the upgrade flag in VIM - # This prevents VM changes during the upgrade and health checks - if utils.get_system_mode() != constants.SYSTEM_MODE_SIMPLEX: - vim_api.set_vim_upgrade_state(controller_0, True) - except Exception as e: - LOG.exception(e) - raise wsme.exc.ClientSideError(_( - "upgrade-start rejected: Unable to set VIM upgrade state")) - - success, output = pecan.request.rpcapi.get_system_health( - pecan.request.context, force=force, upgrade=True) - - if not success: - LOG.info("Health audit failure during upgrade start. Health " - "query results: %s" % output) - if os.path.exists(constants.SYSINV_RUNNING_IN_LAB) and force: - LOG.info("Running in lab, ignoring health errors.") - else: - vim_api.set_vim_upgrade_state(controller_0, False) - raise wsme.exc.ClientSideError(_( - "upgrade-start rejected: System is not in a valid state " - "for upgrades. Run system health-query-upgrade for more " - "details.")) - - # Create upgrade record. Must do this before the prepare_upgrade so - # the upgrade record exists when the database is dumped. - create_values = {'from_load': from_load.id, - 'to_load': to_load.id, - 'state': constants.UPGRADE_STARTING} - new_upgrade = None - try: - new_upgrade = pecan.request.dbapi.software_upgrade_create( - create_values) - except Exception as ex: - vim_api.set_vim_upgrade_state(controller_0, False) - LOG.exception(ex) - raise - - # Prepare for upgrade - LOG.info("Starting upgrade from release: %s to release: %s" % - (from_version, to_version)) - - try: - pecan.request.rpcapi.start_upgrade(pecan.request.context, - new_upgrade) - except Exception as ex: - vim_api.set_vim_upgrade_state(controller_0, False) - pecan.request.dbapi.software_upgrade_destroy(new_upgrade.uuid) - LOG.exception(ex) - raise - - return Upgrade.convert_with_links(new_upgrade) + raise NotImplementedError("This API is deprecated.") @cutils.synchronized(LOCK_NAME) @wsme.validate([UpgradePatchType]) @wsme_pecan.wsexpose(Upgrade, body=[UpgradePatchType]) def patch(self, patch): """Updates attributes of Software Upgrade.""" - updates = self._get_updates(patch) - - # Get the current upgrade - try: - upgrade = pecan.request.dbapi.software_upgrade_get_one() - except exception.NotFound: - raise wsme.exc.ClientSideError(_( - "operation rejected: An upgrade is not in progress.")) - - to_load = pecan.request.dbapi.load_get(upgrade.to_load) - to_version = to_load.software_version - - if updates['state'] == constants.UPGRADE_ABORTING: - # Make sure upgrade wasn't already aborted - if upgrade.state in [constants.UPGRADE_ABORTING, - constants.UPGRADE_ABORTING_ROLLBACK]: - raise wsme.exc.ClientSideError(_( - "upgrade-abort rejected: Upgrade already aborted ")) - - # Abort the upgrade - rpc_upgrade = pecan.request.rpcapi.abort_upgrade( - pecan.request.context, upgrade) - - return Upgrade.convert_with_links(rpc_upgrade) - - # if an activation is requested, make sure we are not already in - # activating state or have already activated - elif updates['state'] == constants.UPGRADE_ACTIVATION_REQUESTED: - - # if a restore is in progress, we need to restart the - # upgrade process for non simplex systems - if tsc.system_mode != constants.SYSTEM_MODE_SIMPLEX: - if self.check_restore_in_progress(): - raise wsme.exc.ClientSideError(_( - "upgrade-activate rejected: A restore was in progress before" - " upgrade was started. Complete the restore of the" - " previous release before reattempting upgrade.")) - - if upgrade.state in [constants.UPGRADE_ACTIVATING, - constants.UPGRADE_ACTIVATING_HOSTS, - constants.UPGRADE_ACTIVATION_COMPLETE]: - raise wsme.exc.ClientSideError(_( - "upgrade-activate rejected: " - "Upgrade already activating or activated.")) - - # All hosts must be unlocked and enabled, - # and running the new release - hosts = cutils.get_upgradable_hosts(pecan.request.dbapi) - for host in hosts: - if host['administrative'] != constants.ADMIN_UNLOCKED or \ - host['operational'] != constants.OPERATIONAL_ENABLED: - raise wsme.exc.ClientSideError(_( - "upgrade-activate rejected: All hosts must be unlocked" - " and enabled before the upgrade can be activated.")) - for host in hosts: - host_upgrade = objects.host_upgrade.get_by_host_id( - pecan.request.context, host.id) - if (host_upgrade.target_load != to_load.id or - host_upgrade.software_load != to_load.id): - raise wsme.exc.ClientSideError(_( - "upgrade-activate rejected: All hosts must be " - "upgraded before the upgrade can be activated.")) - - # we need to make sure the state is updated before calling the rpc - rpc_upgrade = pecan.request.dbapi.software_upgrade_update( - upgrade.uuid, updates) - pecan.request.rpcapi.activate_upgrade(pecan.request.context, - upgrade) - - # make sure the to/from loads are in the correct state - pecan.request.dbapi.set_upgrade_loads_state( - upgrade, - constants.ACTIVE_LOAD_STATE, - constants.IMPORTED_LOAD_STATE) - - LOG.info("Setting SW_VERSION to release: %s" % to_version) - system = pecan.request.dbapi.isystem_get_one() - pecan.request.dbapi.isystem_update( - system.uuid, {'software_version': to_version}) - - return Upgrade.convert_with_links(rpc_upgrade) + raise NotImplementedError("This API is deprecated.") @cutils.synchronized(LOCK_NAME) @wsme_pecan.wsexpose(Upgrade) def delete(self): """Complete upgrade and delete Software Upgrade instance.""" - # There must be an upgrade in progress - try: - upgrade = pecan.request.dbapi.software_upgrade_get_one() - except exception.NotFound: - raise wsme.exc.ClientSideError(_( - "upgrade-complete rejected: An upgrade is not in progress.")) - - # Only complete the upgrade from controller-0. This is to ensure that - # we can clean up all the upgrades related files, some of which are - # local to controller-0. - if socket.gethostname() != constants.CONTROLLER_0_HOSTNAME: - raise wsme.exc.ClientSideError(_( - "upgrade-complete rejected: An upgrade can only be completed " - "when %s is active." % constants.CONTROLLER_0_HOSTNAME)) - - from_load = pecan.request.dbapi.load_get(upgrade.from_load) - - if upgrade.state == constants.UPGRADE_ACTIVATION_COMPLETE: - # Complete the upgrade - current_abort_state = upgrade.state - upgrade = pecan.request.dbapi.software_upgrade_update( - upgrade.uuid, {'state': constants.UPGRADE_COMPLETING}) - try: - pecan.request.rpcapi.complete_upgrade( - pecan.request.context, upgrade, current_abort_state) - except Exception as ex: - LOG.exception(ex) - pecan.request.dbapi.software_upgrade_update( - upgrade.uuid, - {'state': constants.UPGRADE_ACTIVATION_COMPLETE}) - raise - - elif upgrade.state in [constants.UPGRADE_ABORTING, - constants.UPGRADE_ABORTING_ROLLBACK]: - # All upgradable hosts must be running the old release - hosts = cutils.get_upgradable_hosts(pecan.request.dbapi) - for host in hosts: - host_upgrade = objects.host_upgrade.get_by_host_id( - pecan.request.context, host.id) - if (host_upgrade.target_load != from_load.id or - host_upgrade.software_load != from_load.id): - raise wsme.exc.ClientSideError(_( - "upgrade-abort rejected: All hosts must be downgraded " - "before the upgrade can be aborted.")) - - current_abort_state = upgrade.state - - upgrade = pecan.request.dbapi.software_upgrade_update( - upgrade.uuid, {'state': constants.UPGRADE_ABORT_COMPLETING}) - - try: - pecan.request.rpcapi.complete_upgrade( - pecan.request.context, upgrade, current_abort_state) - except Exception as ex: - LOG.exception(ex) - pecan.request.dbapi.software_upgrade_update( - upgrade.uuid, {'state': current_abort_state}) - raise - - else: - raise wsme.exc.ClientSideError(_( - "upgrade-complete rejected: An upgrade can only be completed " - "when in the %s or %s state." % - (constants.UPGRADE_ACTIVATION_COMPLETE, - constants.UPGRADE_ABORTING))) - - return Upgrade.convert_with_links(upgrade) + raise NotImplementedError("This API is deprecated.") @wsme_pecan.wsexpose(wtypes.text, six.text_type) def in_upgrade(self, uuid): - # uuid is added here for potential future use - try: - upgrade = pecan.request.dbapi.software_upgrade_get_one() - - # We will wipe all the disks in the case of a host reinstall - # during a downgrade. - if upgrade.state in [constants.UPGRADE_ABORTING_ROLLBACK]: - LOG.info("in_upgrade status. Aborting upgrade, host reinstall") - return False - - except exception.NotFound: - return False - return True + raise NotImplementedError("This API is deprecated.") @wsme_pecan.wsexpose(wtypes.text, six.text_type) def upgrade_in_progress(self, uuid): - # uuid is added here for potential future use - try: - upgrade = pecan.request.dbapi.software_upgrade_get_one() - - # upgrade in progress only when upgrade starts and not abort - if upgrade.state and upgrade.state not in [ - constants.UPGRADE_ABORTING_ROLLBACK, - constants.UPGRADE_ABORTING, - constants.UPGRADE_ABORT_COMPLETING]: - return True - - except exception.NotFound: - return False - return False + raise NotImplementedError("This API is deprecated.") diff --git a/sysinv/sysinv/sysinv/sysinv/tests/api/test_load.py b/sysinv/sysinv/sysinv/sysinv/tests/api/test_load.py deleted file mode 100644 index a328626833..0000000000 --- a/sysinv/sysinv/sysinv/sysinv/tests/api/test_load.py +++ /dev/null @@ -1,275 +0,0 @@ -# -# Copyright (c) 2023 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - - -import os -import webtest.app - -from mock import patch -from mock import MagicMock -from sysinv.common import constants -from sysinv.tests.api import base -from sysinv.tests.db import utils -from sysinv.openstack.common.rpc import common - - -class FakeConductorAPI(object): - def __init__(self): - self.import_load = MagicMock() - self.delete_load = MagicMock() - self.start_import_load = MagicMock() - self.start_import_load.return_value = utils.create_test_load() - - -class TestLoad(base.FunctionalTest): - def setUp(self): - super(TestLoad, self).setUp() - - self.API_HEADERS = {'User-Agent': 'sysinv-test'} - - self.PATH_PREFIX = '/loads' - - conductor_api = patch('sysinv.conductor.rpcapiproxy.ConductorAPI') - self.mock_conductor_api = conductor_api.start() - self.fake_conductor_api = FakeConductorAPI() - self.mock_conductor_api.return_value = self.fake_conductor_api - self.addCleanup(conductor_api.stop) - - socket_gethostname = patch('socket.gethostname') - self.mock_socket_gethostname = socket_gethostname.start() - self.mock_socket_gethostname.return_value = 'controller-0' - self.addCleanup(socket_gethostname.stop) - - # TODO: Improve these unit test to don't mock this method. - upload_file = patch( - 'sysinv.api.controllers.v1.load.LoadController._upload_file' - ) - self.mock_upload_file = upload_file.start() - self.mock_upload_file.return_value = '/tmp/iso/' - self.addCleanup(upload_file.stop) - - -@patch('sysinv.common.utils.is_space_available', lambda x, y: True) -class TestLoadImport(TestLoad): - def setUp(self): - super(TestLoadImport, self).setUp() - - path_import = '%s/import_load' % self.PATH_PREFIX - iso = os.path.join( - os.path.dirname(__file__), "data", "bootimage.iso" - ) - sig = os.path.join( - os.path.dirname(__file__), "data", "bootimage.sig" - ) - - self.request_json = { - 'path': path_import, - 'params': { - 'path_to_iso': iso, - 'path_to_sig': sig, - 'active': 'false', - 'inactive': 'false', - }, - 'headers': self.API_HEADERS, - } - - upload_files = [('path_to_iso', iso), ('path_to_sig', sig)] - self.request_multiform = { - 'path': path_import, - 'params': {'active': 'false', 'inactive': 'false'}, - 'upload_files': upload_files, - 'headers': self.API_HEADERS, - 'expect_errors': False, - } - - def _assert_load(self, load): - self.assertEqual(load['software_version'], utils.SW_VERSION) - self.assertEqual(load['compatible_version'], 'N/A') - self.assertEqual(load['required_patches'], 'N/A') - self.assertEqual(load['state'], constants.ACTIVE_LOAD_STATE) - - def test_load_import(self): - response = self.post_with_files(**self.request_multiform) - - self._assert_load(response.json) - self.fake_conductor_api.start_import_load.assert_called_once() - self.fake_conductor_api.import_load.assert_called_once() - - def test_load_import_local(self): - response = self.post_json(**self.request_json) - - self._assert_load(response.json) - self.fake_conductor_api.start_import_load.assert_called_once() - self.fake_conductor_api.import_load.assert_called_once() - - def test_load_import_active(self): - isystem_get_one = self.dbapi.isystem_get_one - self.dbapi.isystem_get_one = MagicMock() - self.dbapi.isystem_get_one.return_value.distributed_cloud_role = \ - constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER - - self.request_multiform['params']['active'] = 'true' - response = self.post_with_files(**self.request_multiform) - - self.dbapi.isystem_get_one = isystem_get_one - - self._assert_load(response.json) - self.fake_conductor_api.start_import_load.assert_called_once() - self.fake_conductor_api.import_load.assert_not_called() - - def test_load_import_inactive(self): - isystem_get_one = self.dbapi.isystem_get_one - self.dbapi.isystem_get_one = MagicMock() - self.dbapi.isystem_get_one.return_value.distributed_cloud_role = \ - constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER - - self.request_multiform['params']['inactive'] = 'true' - response = self.post_with_files(**self.request_multiform) - - self.dbapi.isystem_get_one = isystem_get_one - - self._assert_load(response.json) - self.fake_conductor_api.start_import_load.assert_called_once() - self.fake_conductor_api.import_load.assert_called_once() - - def test_load_import_invalid_hostname(self): - self.mock_socket_gethostname.return_value = 'controller-1' - - self.assertRaises( - webtest.app.AppError, - self.post_with_files, - **self.request_multiform, - ) - - self.fake_conductor_api.start_import_load.assert_not_called() - self.fake_conductor_api.import_load.assert_not_called() - - def test_load_import_empty_request(self): - self.request_multiform['upload_files'] = None - - self.assertRaises( - webtest.app.AppError, - self.post_with_files, - **self.request_multiform, - ) - - self.fake_conductor_api.start_import_load.assert_not_called() - self.fake_conductor_api.import_load.assert_not_called() - - def test_load_import_missing_required_file(self): - self.request_multiform['upload_files'].pop() - - self.assertRaises( - webtest.app.AppError, - self.post_with_files, - **self.request_multiform, - ) - - self.fake_conductor_api.start_import_load.assert_not_called() - self.fake_conductor_api.import_load.assert_not_called() - - def test_load_import_failed_to_create_load_conductor(self): - self.fake_conductor_api.start_import_load.return_value = None - - self.assertRaises( - webtest.app.AppError, - self.post_with_files, - **self.request_multiform, - ) - - self.fake_conductor_api.start_import_load.assert_called_once() - self.fake_conductor_api.import_load.assert_not_called() - - def test_load_import_failed_to_import_load_conductor(self): - self.fake_conductor_api.import_load.side_effect = common.RemoteError() - - self.assertRaises( - webtest.app.AppError, - self.post_with_files, - **self.request_multiform, - ) - - self.fake_conductor_api.start_import_load.assert_called_once() - self.fake_conductor_api.import_load.assert_called_once() - - -class TestLoadDelete(TestLoad): - def setUp(self): - super(TestLoadDelete, self).setUp() - - load_data = { - "software_version": "1.0", - "state": constants.INACTIVE_LOAD_STATE, - } - - self.load = utils.create_test_load(**load_data) - - self.request_json = { - 'path': f'{self.PATH_PREFIX}/{self.load.id}', - 'headers': self.API_HEADERS, - } - - def tearDown(self): - super(TestLoadDelete, self).tearDown() - - def test_load_delete(self): - isystem_get_one = self.dbapi.isystem_get_one - self.dbapi.isystem_get_one = MagicMock() - self.dbapi.isystem_get_one.return_value.distributed_cloud_role = None - - response = self.delete(**self.request_json) - - self.dbapi.isystem_get_one = isystem_get_one - - self.assertEqual(response.status_int, 200) - - self.fake_conductor_api.delete_load.assert_called_once() - - def test_load_delete_used_by_software_upgrade(self): - software_upgrade_get_one = self.dbapi.software_upgrade_get_one - - self.dbapi.software_upgrade_get_one = MagicMock() - - upgrade = utils.create_test_upgrade(**{'to_load': self.load.id}) - - self.dbapi.software_upgrade_get_one.return_value = upgrade - - self.assertRaises( - webtest.app.AppError, - self.delete, - **self.request_json, - ) - - self.dbapi.software_upgrade_get_one = software_upgrade_get_one - - self.fake_conductor_api.delete_load.assert_not_called() - - def test_load_delete_used_by_host(self): - self.dbapi.host_upgrade_get_list = MagicMock() - - self.dbapi.host_upgrade_get_list.return_value = {"target_load": self.load.id} - - self.assertRaises( - webtest.app.AppError, - self.delete, - **self.request_json, - ) - - self.fake_conductor_api.delete_load.assert_not_called() - - def test_load_delete_invalid_state(self): - utils.update_test_load( - self.load.id, - **{'state': constants.IMPORTING_LOAD_STATE}, - ) - - self.assertRaises( - webtest.app.AppError, - self.delete, - **self.request_json, - ) - - self.fake_conductor_api.delete_load.assert_not_called() diff --git a/sysinv/sysinv/sysinv/sysinv/tests/api/test_upgrade.py b/sysinv/sysinv/sysinv/sysinv/tests/api/test_upgrade.py deleted file mode 100644 index 5834cf2109..0000000000 --- a/sysinv/sysinv/sysinv/sysinv/tests/api/test_upgrade.py +++ /dev/null @@ -1,94 +0,0 @@ -# -# Copyright (c) 2019 Wind River Systems, Inc. -# -# SPDX-License-Identifier: Apache-2.0 -# - -""" -Tests for the API /upgrade/ methods. -""" - -import mock -from six.moves import http_client - -from sysinv.common import constants -from sysinv.common import kubernetes - -from sysinv.tests.api import base -from sysinv.tests.db import base as dbbase -from sysinv.tests.db import utils as dbutils - - -class FakeConductorAPI(object): - - def __init__(self): - self.start_upgrade = mock.MagicMock() - self.get_system_health_return = (True, "System is super healthy") - - def get_system_health(self, context, force=False, upgrade=False, - kube_upgrade=False, alarm_ignore_list=None): - if force: - return True, "System is healthy because I was forced to say that" - else: - return self.get_system_health_return - - -class TestUpgrade(base.FunctionalTest, dbbase.BaseSystemTestCase): - - def setUp(self): - super(TestUpgrade, self).setUp() - - # Mock the Conductor API - self.fake_conductor_api = FakeConductorAPI() - p = mock.patch('sysinv.conductor.rpcapiproxy.ConductorAPI') - self.mock_conductor_api = p.start() - self.mock_conductor_api.return_value = self.fake_conductor_api - self.addCleanup(p.stop) - - # Behave as if the API is running on controller-0 - p = mock.patch('socket.gethostname') - self.mock_socket_gethostname = p.start() - self.mock_socket_gethostname.return_value = 'controller-0' - self.addCleanup(p.stop) - - -class TestPostUpgrade(TestUpgrade, dbbase.ControllerHostTestCase): - - def test_create(self): - # Create the to load - dbutils.create_test_load(software_version=dbutils.SW_VERSION_NEW, - compatible_version=dbutils.SW_VERSION, - state=constants.IMPORTED_LOAD_STATE) - - # Test creation of upgrade - create_dict = dbutils.get_test_upgrade() - result = self.post_json('/upgrade', create_dict, - headers={'User-Agent': 'sysinv-test'}) - - # Verify that the upgrade was started - self.fake_conductor_api.start_upgrade.assert_called_once() - - # Verify that the upgrade has the expected attributes - self.assertEqual(result.json['from_release'], dbutils.SW_VERSION) - self.assertEqual(result.json['to_release'], dbutils.SW_VERSION_NEW) - self.assertEqual(result.json['state'], constants.UPGRADE_STARTING) - - def test_create_kube_upgrade_exists(self): - # Test creation of upgrade when a kubernetes upgrade exists - dbutils.create_test_kube_upgrade( - from_version='v1.42.1', - to_version='v1.42.2', - state=kubernetes.KUBE_UPGRADING_FIRST_MASTER, - ) - - # Test creation of upgrade - create_dict = dbutils.get_test_upgrade() - result = self.post_json('/upgrade', create_dict, - headers={'User-Agent': 'sysinv-test'}, - expect_errors=True) - - # Verify the failure - self.assertEqual(result.content_type, 'application/json') - self.assertEqual(http_client.BAD_REQUEST, result.status_int) - self.assertIn("cannot be done while a kubernetes upgrade", - result.json['error_message'])