From 578f24bf18007bc90f6c03a3f1a70a9743a8f81c Mon Sep 17 00:00:00 2001 From: Julia Kreger Date: Wed, 19 Jun 2024 09:46:50 -0700 Subject: [PATCH] Remove deprecated idrac wsman driver interfaces Change-Id: I70738db25fdf9902575ac92195c3a40f1d7a0976 --- doc/source/admin/drivers/idrac.rst | 356 +-- driver-requirements.txt | 1 - ironic/drivers/drac.py | 22 +- ironic/drivers/modules/drac/bios.py | 621 ----- ironic/drivers/modules/drac/common.py | 117 - ironic/drivers/modules/drac/inspect.py | 236 +- ironic/drivers/modules/drac/job.py | 116 - ironic/drivers/modules/drac/management.py | 385 +-- ironic/drivers/modules/drac/power.py | 243 -- ironic/drivers/modules/drac/raid.py | 1495 ------------ .../drivers/modules/drac/vendor_passthru.py | 179 -- ironic/tests/unit/db/utils.py | 6 - .../unit/drivers/modules/drac/test_bios.py | 647 ----- .../unit/drivers/modules/drac/test_common.py | 125 - .../unit/drivers/modules/drac/test_inspect.py | 491 ---- .../unit/drivers/modules/drac/test_job.py | 175 -- .../drivers/modules/drac/test_management.py | 803 +----- .../modules/drac/test_periodic_task.py | 458 ---- .../unit/drivers/modules/drac/test_power.py | 212 -- .../unit/drivers/modules/drac/test_raid.py | 2150 ----------------- .../tests/unit/drivers/modules/drac/utils.py | 42 +- ironic/tests/unit/drivers/test_drac.py | 45 +- .../drivers/third_party_driver_mock_specs.py | 30 - .../unit/drivers/third_party_driver_mocks.py | 45 - ...precated-idrac-wsman-c31861acd8742eb9.yaml | 5 + setup.cfg | 11 - 26 files changed, 64 insertions(+), 8952 deletions(-) delete mode 100644 ironic/drivers/modules/drac/common.py delete mode 100644 ironic/drivers/modules/drac/job.py delete mode 100644 ironic/tests/unit/drivers/modules/drac/test_bios.py delete mode 100644 ironic/tests/unit/drivers/modules/drac/test_common.py delete mode 100644 ironic/tests/unit/drivers/modules/drac/test_job.py delete mode 100644 ironic/tests/unit/drivers/modules/drac/test_periodic_task.py delete mode 100644 ironic/tests/unit/drivers/modules/drac/test_power.py create mode 100644 releasenotes/notes/remove-deprecated-idrac-wsman-c31861acd8742eb9.yaml diff --git a/doc/source/admin/drivers/idrac.rst b/doc/source/admin/drivers/idrac.rst index dd6d87628d..764b2d1494 100644 --- a/doc/source/admin/drivers/idrac.rst +++ b/doc/source/admin/drivers/idrac.rst @@ -5,17 +5,12 @@ iDRAC driver Overview ======== -.. warning:: - The ``-wsman`` driver interfaces have been deprecated and are anticipated - to be removed from Ironic at some point during or after the 2024.2 - development cycle. The anticipated forward management path is to migrate - to the ``-redfish`` driver interfaces or the ``redfish`` hardware type. - The integrated Dell Remote Access Controller (iDRAC_) is an out-of-band management platform on Dell EMC servers, and is supported directly by -the ``idrac`` hardware type. This driver uses the Dell Web Services for -Management (WSMAN) protocol and the standard Distributed Management Task -Force (DMTF) Redfish protocol to perform all of its functions. +the ``idrac`` hardware type. This driver utilizes the Distributed +Management Task Force (DMTF) Redfish protocol to perform all of it's +functions. In older versions of Ironic, this driver leveraged +Web Services for Management (WSMAN) protocol. iDRAC_ hardware is also supported by the generic ``ipmi`` and ``redfish`` hardware types, though with smaller feature sets. @@ -38,19 +33,12 @@ The ``idrac`` hardware type supports the following Ironic interfaces: * `Management Interface`_: Boot device and firmware management * Power Interface: Power management * `RAID Interface`_: RAID controller and disk management -* `Vendor Interface`_: BIOS management (WSMAN) and eject virtual media - (Redfish) +* `Vendor Interface`_: eject virtual media (Redfish) Prerequisites ------------- -The ``idrac`` hardware type requires the ``python-dracclient`` library -to be installed on the ironic conductor node(s) if an Ironic node is -configured to use an ``idrac-wsman`` interface implementation, for example:: - - sudo pip install 'python-dracclient>=3.1.0' - -Additionally, the ``idrac`` hardware type requires the ``sushy`` library +The ``idrac`` hardware type requires the ``sushy`` library to be installed on the ironic conductor node(s) if an Ironic node is configured to use an ``idrac-redfish`` interface implementation, for example:: @@ -59,28 +47,24 @@ configured to use an ``idrac-redfish`` interface implementation, for example:: Enabling -------- -The iDRAC driver supports WSMAN for the bios, inspect, management, power, -raid, and vendor interfaces. In addition, it supports Redfish for -the bios, inspect, management, power, and raid interfaces. The iDRAC driver -allows you to mix and match WSMAN and Redfish interfaces. +The iDRAC driver supports Redfish for the bios, inspect, management, power, +and raid interfaces. -The ``idrac-wsman`` implementation must be enabled to use WSMAN for -an interface. The ``idrac-redfish`` implementation must be enabled +The ``idrac-redfish`` implementation must be enabled to use Redfish for an interface. -To enable the ``idrac`` hardware type with the minimum interfaces, -all using WSMAN, add the following to your ``/etc/ironic/ironic.conf``: +To enable the ``idrac`` hardware type, add the following to your +``/etc/ironic/ironic.conf``: .. code-block:: ini [DEFAULT] enabled_hardware_types=idrac - enabled_management_interfaces=idrac-wsman - enabled_power_interfaces=idrac-wsman + enabled_management_interfaces=idrac-redfish + enabled_power_interfaces=idrac-redfish -To enable all optional features (BIOS, inspection, RAID, and vendor passthru) -using Redfish where it is supported and WSMAN where not, use the -following configuration: +To enable all optional features (BIOS, inspection, RAID, and vendor passthru), +use the following configuration: .. code-block:: ini @@ -100,43 +84,30 @@ order: ================ =================================================== Interface Supported Implementations ================ =================================================== -``bios`` ``idrac-wsman``, ``idrac-redfish``, ``no-bios`` +``bios`` ``idrac-redfish``, ``no-bios`` ``boot`` ``ipxe``, ``pxe``, ``idrac-redfish-virtual-media`` ``console`` ``no-console`` ``deploy`` ``direct``, ``ansible``, ``ramdisk`` ``firmware`` ``redfish``, ``no-firmware`` -``inspect`` ``idrac-wsman``, ``idrac``, ``idrac-redfish``, +``inspect`` ``idrac-redfish``, ``inspector``, ``no-inspect`` -``management`` ``idrac-wsman``, ``idrac``, ``idrac-redfish`` +``management`` ``idrac-redfish`` ``network`` ``flat``, ``neutron``, ``noop`` -``power`` ``idrac-wsman``, ``idrac``, ``idrac-redfish`` -``raid`` ``idrac-wsman``, ``idrac``, ``idrac-redfish``, ``no-raid`` +``power`` ``idrac-redfish`` +``raid`` ``idrac-redfish``, ``no-raid`` ``rescue`` ``no-rescue``, ``agent`` ``storage`` ``noop``, ``cinder``, ``external`` -``vendor`` ``idrac-wsman``, ``idrac``, ``idrac-redfish``, +``vendor`` ``idrac-redfish``, ``no-vendor`` ================ =================================================== -.. NOTE:: - ``idrac`` is the legacy name of the WSMAN interface. It has been - deprecated in favor of ``idrac-wsman`` and may be removed in a - future release. - Protocol-specific Properties ---------------------------- -The WSMAN and Redfish protocols require different properties to be specified +The Redfish protocols require different properties to be specified in the Ironic node's ``driver_info`` field to communicate with the bare metal system's iDRAC. -The WSMAN protocol requires the following properties: - -* ``drac_username``: The WSMAN user name to use when communicating - with the iDRAC. Usually ``root``. -* ``drac_password``: The password for the WSMAN user to use when - communicating with the iDRAC. -* ``drac_address``: The IP address of the iDRAC. - The Redfish protocol requires the following properties: * ``redfish_username``: The Redfish user name to use when @@ -151,25 +122,9 @@ The Redfish protocol requires the following properties: For other Redfish protocol parameters see :doc:`/admin/drivers/redfish`. -If using only interfaces which use WSMAN (``idrac-wsman``), then only -the WSMAN properties must be supplied. If using only interfaces which -use Redfish (``idrac-redfish``), then only the Redfish properties must be -supplied. If using a mix of interfaces, where some use WSMAN and others -use Redfish, both the WSMAN and Redfish properties must be supplied. - Enrolling --------- -The following command enrolls a bare metal node with the ``idrac`` -hardware type using WSMAN for all interfaces: - -.. code-block:: bash - - baremetal node create --driver idrac \ - --driver-info drac_username=user \ - --driver-info drac_password=pa$$w0rd \ - --driver-info drac_address=drac.host - The following command enrolls a bare metal node with the ``idrac`` hardware type using Redfish for all interfaces: @@ -187,29 +142,6 @@ hardware type using Redfish for all interfaces: --raid-interface idrac-redfish \ --vendor-interface idrac-redfish -The following command enrolls a bare metal node with the ``idrac`` -hardware type assuming a mix of Redfish and WSMAN interfaces are used: - -.. code-block:: bash - - baremetal node create --driver idrac \ - --driver-info drac_username=user \ - --driver-info drac_password=pa$$w0rd - --driver-info drac_address=drac.host \ - --driver-info redfish_username=user \ - --driver-info redfish_password=pa$$w0rd \ - --driver-info redfish_address=drac.host \ - --driver-info redfish_system_id=/redfish/v1/Systems/System.Embedded.1 \ - --bios-interface idrac-redfish \ - --inspect-interface idrac-redfish \ - --management-interface idrac-redfish \ - --power-interface idrac-redfish - -.. NOTE:: - If using WSMAN for the management interface, then WSMAN must be used - for the power interface. The same applies to Redfish. It is currently not - possible to use Redfish for one and WSMAN for the other. - BIOS Interface ============== @@ -252,7 +184,7 @@ Inspect Interface The Dell iDRAC out-of-band inspection process catalogs all the same attributes of the server as the IPMI driver. Unlike IPMI, it does this without requiring the system to be rebooted, or even to be powered on. -Inspection is performed using the Dell WSMAN or Redfish protocol directly +Inspection is performed using the Redfish protocol directly without affecting the operation of the system being inspected. The inspection discovers the following properties: @@ -267,8 +199,6 @@ Extra capabilities: * ``pci_gpu_devices``: number of GPU devices connected to the bare metal. It also creates baremetal ports for each NIC port detected in the system. -The ``idrac-wsman`` inspect interface discovers which NIC ports are -configured to PXE boot and sets ``pxe_enabled`` to ``True`` on those ports. The ``idrac-redfish`` inspect interface does not currently set ``pxe_enabled`` on the ports. The user should ensure that ``pxe_enabled`` is set correctly on the ports following inspection with the ``idrac-redfish`` inspect interface. @@ -487,7 +417,7 @@ Compared to ``redfish`` RAID interface, using ``idrac-redfish`` adds: * Converting non-RAID disks to RAID mode if there are any, * Clearing foreign configuration, if any, after deleting virtual disks. -The following properties are supported by the iDRAC WSMAN and Redfish RAID +The following properties are supported by the Redfish RAID interface implementation: .. NOTE:: @@ -633,223 +563,6 @@ Or using ``sushy`` with Redfish: Vendor Interface ================ -idrac-wsman ------------ - -Dell iDRAC BIOS management is available through the Ironic WSMAN vendor -passthru interface. - -======================== ============ ====================================== -Method Name HTTP Method Description -======================== ============ ====================================== -``abandon_bios_config`` ``DELETE`` Abandon a BIOS configuration job. -``commit_bios_config`` ``POST`` Commit a BIOS configuration job - submitted through ``set_bios_config``. - Required argument: ``reboot`` - - indicates whether a reboot job - should be automatically created - with the config job. Returns a - dictionary containing the ``job_id`` - key with the ID of the newly created - config job, and the - ``reboot_required`` key indicating - whether the node needs to be rebooted - to execute the config job. -``get_bios_config`` ``GET`` Returns a dictionary containing the - node's BIOS settings. -``list_unfinished_jobs`` ``GET`` Returns a dictionary containing - the key ``unfinished_jobs``; its value - is a list of dictionaries. Each - dictionary represents an unfinished - config job object. -``set_bios_config`` ``POST`` Change the BIOS configuration on - a node. Required argument: a - dictionary of {``AttributeName``: - ``NewValue``}. Returns a dictionary - containing the ``is_commit_required`` - key indicating whether - ``commit_bios_config`` needs to be - called to apply the changes and the - ``is_reboot_required`` value - indicating whether the server must - also be rebooted. Possible values are - ``true`` and ``false``. -======================== ============ ====================================== - - -Examples -^^^^^^^^ - -Get BIOS Config -~~~~~~~~~~~~~~~ - -.. code-block:: bash - - baremetal node passthru call --http-method GET get_bios_config - -Snippet of output showing virtualization enabled: - -.. code-block:: json - - {"ProcVirtualization": { - "current_value": "Enabled", - "instance_id": "BIOS.Setup.1-1:ProcVirtualization", - "name": "ProcVirtualization", - "pending_value": null, - "possible_values": [ - "Enabled", - "Disabled"], - "read_only": false }} - -There are a number of items to note from the above snippet: - -* ``name``: this is the name to use in a call to ``set_bios_config``. -* ``current_value``: the current state of the setting. -* ``pending_value``: if the value has been set, but not yet committed, - the new value is shown here. The change can either be committed or - abandoned. -* ``possible_values``: shows a list of valid values which can be used - in a call to ``set_bios_config``. -* ``read_only``: indicates if the value is capable of being changed. - -Set BIOS Config -~~~~~~~~~~~~~~~ - -.. code-block:: bash - - baremetal node passthru call set_bios_config --arg "name=value" - - -Walkthrough of performing a BIOS configuration change: - -The following section demonstrates how to change BIOS configuration settings, -detect that a commit and reboot are required, and act on them accordingly. The -two properties that are being changed are: - -* Enable virtualization technology of the processor -* Globally enable SR-IOV - -.. code-block:: bash - - baremetal node passthru call set_bios_config \ - --arg "ProcVirtualization=Enabled" \ - --arg "SriovGlobalEnable=Enabled" - -This returns a dictionary indicating what actions are required next: - -.. code-block:: json - - { - "is_reboot_required": true, - "is_commit_required": true - } - - -Commit BIOS Changes -~~~~~~~~~~~~~~~~~~~ - -The next step is to commit the pending change to the BIOS. Note that in this -example, the ``reboot`` argument is set to ``true``. The response indicates -that a reboot is no longer required as it has been scheduled automatically -by the ``commit_bios_config`` call. If the reboot argument is not supplied, -the job is still created, however it remains in the ``scheduled`` state -until a reboot is performed. The reboot can be initiated through the -Ironic power API. - -.. code-block:: bash - - baremetal node passthru call commit_bios_config \ - --arg "reboot=true" - -.. code-block:: json - - { - "job_id": "JID_499377293428", - "reboot_required": false - } - -The state of any executing job can be queried: - -.. code-block:: bash - - baremetal node passthru call --http-method GET list_unfinished_jobs - - -.. code-block:: json - - {"unfinished_jobs": - [{"status": "Scheduled", - "name": "ConfigBIOS:BIOS.Setup.1-1", - "until_time": "TIME_NA", - "start_time": "TIME_NOW", - "message": "Task successfully scheduled.", - "percent_complete": "0", - "id": "JID_499377293428"}]} - - -Abandon BIOS Changes -~~~~~~~~~~~~~~~~~~~~ - -Instead of committing, a pending change can be abandoned: - -.. code-block:: bash - - baremetal node passthru call --http-method DELETE abandon_bios_config - -The abandon command does not provide a response body. - - -Change Boot Mode -^^^^^^^^^^^^^^^^ - -The boot mode of the iDRAC can be changed to: - -* BIOS - Also called legacy or traditional boot mode. The BIOS initializes the - system’s processors, memory, bus controllers, and I/O devices. After - initialization is complete, the BIOS passes control to operating system (OS) - software. The OS loader uses basic services provided by the system BIOS to - locate and load OS modules into system memory. After booting the system, the - BIOS and embedded management controllers execute system management - algorithms, which monitor and optimize the condition of the underlying - hardware. BIOS configuration settings enable fine-tuning of the - performance, power management, and reliability features of the system. -* UEFI - The Unified Extensible Firmware Interface does not change the - traditional purposes of the system BIOS. To a large extent, a UEFI-compliant - BIOS performs the same initialization, boot, configuration, and management - tasks as a traditional BIOS. However, UEFI does change the interfaces and - data structures the BIOS uses to interact with I/O device firmware and - operating system software. The primary intent of UEFI is to eliminate - shortcomings in the traditional BIOS environment, enabling system firmware to - continue scaling with industry trends. - -The UEFI boot mode offers: - -* Improved partitioning scheme for boot media -* Support for media larger than 2 TB -* Redundant partition tables -* Flexible handoff from BIOS to OS -* Consolidated firmware user interface -* Enhanced resource allocation for boot device firmware - -The boot mode can be changed via the WSMAN vendor passthru interface as -follows: - -.. code-block:: bash - - baremetal node passthru call set_bios_config \ - --arg "BootMode=Uefi" - - baremetal node passthru call commit_bios_config \ - --arg "reboot=true" - -.. code-block:: bash - - baremetal node passthru call set_bios_config \ - --arg "BootMode=Bios" - - baremetal node passthru call commit_bios_config \ - --arg "reboot=true" - idrac-redfish ------------- @@ -896,27 +609,6 @@ settings. .. _Ironic_RAID: https://docs.openstack.org/ironic/latest/admin/raid.html .. _iDRAC: https://www.dell.com/idracmanuals -WSMAN vendor passthru timeout ------------------------------ - -When iDRAC is not ready and executing WSMAN vendor passthru commands, they take -more time as waiting for iDRAC to become ready again and then time out, -for example: - -.. code-block:: bash - - baremetal node passthru call --http-method GET \ - aed58dca-1b25-409a-a32f-3a817d59e1e0 list_unfinished_jobs - Timed out waiting for a reply to message ID 547ce7995342418c99ef1ea4a0054572 (HTTP 500) - -To avoid this need to increase timeout for messaging in ``/etc/ironic/ironic.conf`` -and restart Ironic API service. - -.. code-block:: ini - - [DEFAULT] - rpc_response_timeout = 600 - Timeout when powering off ------------------------- diff --git a/driver-requirements.txt b/driver-requirements.txt index 1b8cb28a22..bd3d9dc4bc 100644 --- a/driver-requirements.txt +++ b/driver-requirements.txt @@ -9,7 +9,6 @@ pysnmp-lextudio>=5.0.0 # BSD pyasn1>=0.5.1 # BSD pyasn1-modules>=0.3.0 # BSD python-scciclient>=0.16.0,<0.17.0 -python-dracclient>=5.1.0,<9.0.0 # Ansible-deploy interface ansible>=2.7 diff --git a/ironic/drivers/drac.py b/ironic/drivers/drac.py index a4ca8004b3..5c8198a9a8 100644 --- a/ironic/drivers/drac.py +++ b/ironic/drivers/drac.py @@ -47,20 +47,19 @@ class IDRACHardware(generic.GenericHardware): @property def supported_management_interfaces(self): """List of supported management interfaces.""" - return [management.DracWSManManagement, management.DracManagement, - management.DracRedfishManagement] + return [management.DracRedfishManagement] @property def supported_power_interfaces(self): """List of supported power interfaces.""" - return [power.DracWSManPower, power.DracPower, power.DracRedfishPower] + return [power.DracRedfishPower] # Optional hardware interfaces @property def supported_bios_interfaces(self): """List of supported bios interfaces.""" - return [bios.DracWSManBIOS, bios.DracRedfishBIOS, noop.NoBIOS] + return [bios.DracRedfishBIOS, noop.NoBIOS] @property def supported_firmware_interfaces(self): @@ -72,20 +71,17 @@ class IDRACHardware(generic.GenericHardware): # Inspector support should have a higher priority than NoInspect # if it is enabled by an operator (implying that the service is # installed). - return [drac_inspect.DracWSManInspect, drac_inspect.DracInspect, - drac_inspect.DracRedfishInspect] + super( - IDRACHardware, self).supported_inspect_interfaces + return [drac_inspect.DracRedfishInspect] + super( + IDRACHardware, self).supported_inspect_interfaces @property def supported_raid_interfaces(self): """List of supported raid interfaces.""" - return [raid.DracWSManRAID, raid.DracRAID, - raid.DracRedfishRAID] + super( - IDRACHardware, self).supported_raid_interfaces + return [raid.DracRedfishRAID] + super( + IDRACHardware, self).supported_raid_interfaces @property def supported_vendor_interfaces(self): """List of supported vendor interfaces.""" - return [vendor_passthru.DracWSManVendorPassthru, - vendor_passthru.DracVendorPassthru, - vendor_passthru.DracRedfishVendorPassthru, noop.NoVendor] + return [vendor_passthru.DracRedfishVendorPassthru, + noop.NoVendor] diff --git a/ironic/drivers/modules/drac/bios.py b/ironic/drivers/modules/drac/bios.py index 8e537435ca..0af607912e 100644 --- a/ironic/drivers/modules/drac/bios.py +++ b/ironic/drivers/modules/drac/bios.py @@ -15,31 +15,7 @@ DRAC BIOS configuration specific methods """ -from ironic_lib import metrics_utils -from oslo_log import log as logging -from oslo_utils import importutils -from oslo_utils import timeutils - -from ironic.common import exception -from ironic.common.i18n import _ -from ironic.conductor import periodics -from ironic.conductor import utils as manager_utils -from ironic.conf import CONF -from ironic.drivers import base -from ironic.drivers.modules import deploy_utils -from ironic.drivers.modules.drac import common as drac_common -from ironic.drivers.modules.drac import job as drac_job from ironic.drivers.modules.redfish import bios as redfish_bios -from ironic import objects - -drac_client = importutils.try_import('dracclient.client') -drac_exceptions = importutils.try_import('dracclient.exceptions') -drac_uris = importutils.try_import('dracclient.resources.uris') -drac_utils = importutils.try_import('dracclient.utils') - -LOG = logging.getLogger(__name__) - -METRICS = metrics_utils.get_metrics_logger(__name__) class DracRedfishBIOS(redfish_bios.RedfishBIOS): @@ -50,600 +26,3 @@ class DracRedfishBIOS(redfish_bios.RedfishBIOS): specific incompatibilities and introduction of vendor value added should be implemented by this class. """ - - -class DracWSManBIOS(base.BIOSInterface): - """BIOSInterface Implementation for iDRAC.""" - - # NOTE(TheJulia): Deprecating November 2023 in favor of Redfish - # and due to a lack of active driver maintenance. - supported = False - - # argsinfo dict for BIOS clean/deploy steps - _args_info = { - "settings": { - "description": "List of BIOS settings to apply", - "required": True - } - } - - def __init__(self): - super(DracWSManBIOS, self).__init__() - if drac_exceptions is None: - raise exception.DriverLoadError( - driver='idrac', - reason=_("Unable to import dracclient.exceptions library")) - - @METRICS.timer('DracWSManBIOS.apply_configuration') - @base.clean_step(priority=0, argsinfo=_args_info, requires_ramdisk=False) - @base.deploy_step(priority=0, argsinfo=_args_info) - def apply_configuration(self, task, settings): - """Apply the BIOS configuration to the node - - :param task: a TaskManager instance containing the node to act on - :param settings: List of BIOS settings to apply - :raises: DRACOperationError upon an error from python-dracclient - - :returns: states.CLEANWAIT (cleaning) or states.DEPLOYWAIT (deployment) - if configuration is in progress asynchronously or None if it - is completed. - """ - - LOG.debug("Configuring node %(node_uuid)s with BIOS settings:" - " %(settings)s", {"node_uuid": task.node.uuid, - "settings": settings}) - node = task.node - # convert ironic settings list to DRAC kwsettings - kwsettings = {s['name']: s['value'] for s in settings} - drac_job.validate_job_queue(node) - client = drac_common.get_drac_client(node) - try: - # Argument validation is done by the dracclient method - # set_bios_settings. No need to do it here. - set_result = client.set_bios_settings(kwsettings) - except drac_exceptions.BaseClientException as exc: - LOG.error("Failed to apply BIOS config on node %(node_uuid)s." - " Error %(error)s", {"node_uuid": task.node.uuid, - "error": exc}) - raise exception.DracOperationError(error=exc) - - # If no commit is required, we're done - if not set_result['is_commit_required']: - LOG.info("Completed BIOS configuration on node %(node_uuid)s" - " with BIOS settings: %(settings)s", - { - "node_uuid": task.node.uuid, - "settings": settings - }) - return - - # Otherwise, need to reboot the node as well to commit configuration - else: - LOG.debug("Rebooting node %(node_uuid)s to apply BIOS settings", - {"node_uuid": task.node.uuid}) - reboot_needed = set_result['is_reboot_required'] - try: - commit_result = client.commit_pending_bios_changes( - reboot=reboot_needed) - except drac_exceptions.BaseClientException as exc: - LOG.error("Failed to commit BIOS changes on node %(node_uuid)s" - ". Error %(error)s", {"node_uuid": task.node.uuid, - "error": exc}) - raise exception.DracOperationError(error=exc) - - # Store JobID for the async job handler _check_node_bios_jobs - bios_config_job_ids = node.driver_internal_info.get( - 'bios_config_job_ids', []) - bios_config_job_ids.append(commit_result) - node.set_driver_internal_info('bios_config_job_ids', - bios_config_job_ids) - - # This method calls node.save(), bios_config_job_ids will then be - # saved. - # These flags are for the conductor to manage the asynchronous - # jobs that have been initiated by this method - deploy_utils.set_async_step_flags( - node, - reboot=reboot_needed, - skip_current_step=True, - polling=True) - # Return the clean/deploy state string - return deploy_utils.get_async_step_return_state(node) - - @METRICS.timer('DracWSManBIOS._query_bios_config_job_status') - # TODO(noor): Consider patch of CONF to add an entry for BIOS query - # spacing since BIOS jobs could be comparatively shorter in time than - # RAID ones currently using the raid spacing to avoid errors - # spacing parameter for periodic method - @periodics.node_periodic( - purpose='checking async bios configuration jobs', - spacing=CONF.drac.query_raid_config_job_status_interval, - filters={'reserved': False, 'maintenance': False}, - predicate_extra_fields=['driver_internal_info'], - predicate=lambda n: ( - n.driver_internal_info.get('bios_config_job_ids') - or n.driver_internal_info.get('factory_reset_time_before_reboot')), - ) - def _query_bios_config_job_status(self, task, manager, context): - """Periodic task to check the progress of running BIOS config jobs. - - :param manager: an instance of Ironic Conductor Manager with - the node list to act on - :param context: context of the request, needed when acquiring - a lock on a node. For access control. - """ - # check bios_config_job_id exist & checks job is completed - if task.node.driver_internal_info.get("bios_config_job_ids"): - self._check_node_bios_jobs(task) - - if task.node.driver_internal_info.get( - "factory_reset_time_before_reboot"): - self._check_last_system_inventory_changed(task) - - def _check_last_system_inventory_changed(self, task): - """Check the progress of last system inventory time of a node. - - This handles jobs for BIOS factory reset. Handle means, - it checks for job status to not only signify completed jobs but - also handle failures by invoking the 'fail' event, allowing the - conductor to put the node into clean/deploy FAIL state. - - :param task: a TaskManager instance with the node to act on - """ - node = task.node - client = drac_common.get_drac_client(node) - # Get the last system inventory time from node before reboot - factory_reset_time_before_reboot = node.driver_internal_info.get( - 'factory_reset_time_before_reboot') - - # Get the factory reset start time - factory_reset_time = node.driver_internal_info.get( - 'factory_reset_time') - LOG.debug("Factory resetting node %(node_uuid)s factory reset time " - " %(factory_reset_time)s", {"node_uuid": task.node.uuid, - "factory_reset_time": - factory_reset_time}) - # local variable to track difference between current time and factory - # reset start time - time_difference = 0 - # Get the last system inventory time after reboot - factory_reset_time_endof_reboot = (client.get_system() - .last_system_inventory_time) - - LOG.debug("Factory resetting node %(node_uuid)s " - "last inventory reboot time after factory reset " - "%(factory_reset_time_endof_reboot)s", - {"node_uuid": task.node.uuid, - "factory_reset_time_endof_reboot": - factory_reset_time_endof_reboot}) - - if factory_reset_time_before_reboot != factory_reset_time_endof_reboot: - # from the database cleanup with factory reset time - self._delete_cached_reboot_time(node) - # Cache the new BIOS settings, - self.cache_bios_settings(task) - self._resume_current_operation(task) - else: - # Calculate difference between current time and factory reset - # start time if it is more than configured timeout then set - # the node to fail state - time = timeutils.utcnow(with_timezone=True - ) - timeutils.parse_isotime(str( - factory_reset_time)) - time_difference = time.total_seconds() - LOG.debug("Factory resetting node %(node_uuid)s " - "time difference %(time_difference)s ", - {"node_uuid": task.node.uuid, "time_difference": - time_difference}) - - if time_difference > CONF.drac.bios_factory_reset_timeout: - task.upgrade_lock() - self._delete_cached_reboot_time(node) - error_message = ("BIOS factory reset was not completed within " - "{} seconds, unable to cache updated bios " - "setting").format( - CONF.drac.bios_factory_reset_timeout) - self._set_failed(task, error_message) - else: - LOG.debug("Factory reset for a node %(node)s is not done " - "will check again later", {'node': task.node.uuid}) - - def _check_node_bios_jobs(self, task): - """Check the progress of running BIOS config jobs of a node. - - This handles jobs for BIOS set and reset. Handle means, - it checks for job status to not only signify completed jobs but - also handle failures by invoking the 'fail' event, allowing the - conductor to put the node into clean/deploy FAIL state. - - :param task: a TaskManager instance with the node to act on - """ - node = task.node - bios_config_job_ids = node.driver_internal_info['bios_config_job_ids'] - finished_job_ids = [] - # local variable to track job failures - job_failed = False - - for config_job_id in bios_config_job_ids: - config_job = drac_job.get_job(node, job_id=config_job_id) - - if config_job is None or config_job.status == 'Completed': - finished_job_ids.append(config_job_id) - elif (config_job.status == 'Failed' - or config_job.status == 'Completed with Errors'): - finished_job_ids.append(config_job_id) - job_failed = True - - # If no job has finished, return - if not finished_job_ids: - return - - # The finished jobs will require a node reboot, need to update the - # node lock to exclusive, allowing a destructive reboot operation - task.upgrade_lock() - # Cleanup the database with finished jobs, they're no longer needed - self._delete_cached_config_job_ids(node, finished_job_ids) - - if not job_failed: - # Cache the new BIOS settings, caching needs to happen here - # since the config steps are async. Decorator won't work. - self.cache_bios_settings(task) - # if no failure, continue with clean/deploy - self._resume_current_operation(task) - else: - # invoke 'fail' event to allow conductor to put the node in - # a clean/deploy fail state - error_message = ("Failed config job: {}. Message: '{}'.".format( - config_job.id, config_job.message)) - self._set_failed(task, error_message) - - def _delete_cached_config_job_ids(self, node, finished_job_ids=None): - """Remove Job IDs from the driver_internal_info table in database. - - :param node: an ironic node object - :param finished_job_ids: a list of finished Job ID strings to remove - """ - if finished_job_ids is None: - finished_job_ids = [] - # take out the unfinished job ids from all the jobs - unfinished_job_ids = [ - job_id for job_id - in node.driver_internal_info['bios_config_job_ids'] - if job_id not in finished_job_ids] - # assign the unfinished job ids back to the total list - # this will clear the finished jobs from the list - node.set_driver_internal_info('bios_config_job_ids', - unfinished_job_ids) - node.save() - - def _delete_cached_reboot_time(self, node): - """Remove factory time from the driver_internal_info table in database. - - :param node: an ironic node object - """ - # Remove the last reboot time and factory reset time - node.del_driver_internal_info('factory_reset_time_before_reboot') - node.del_driver_internal_info('factory_reset_time') - node.save() - - def _set_failed(self, task, error_message): - """Set the node in failed state by invoking 'fail' event. - - :param task: a TaskManager instance with node to act on - :param error_message: Error message - """ - log_msg = ("BIOS configuration failed for node %(node)s. %(error)s " % - {'node': task.node.uuid, - 'error': error_message}) - if task.node.clean_step: - manager_utils.cleaning_error_handler(task, log_msg, error_message) - else: - manager_utils.deploying_error_handler(task, log_msg, error_message) - - def _resume_current_operation(self, task): - """Continue cleaning/deployment of the node. - - For asynchronous operations, it is necessary to notify the - conductor manager to continue the cleaning/deployment operation - after a job has finished. This is done through an RPC call. The - notify_conductor_resume_* wrapper methods provide that. - - :param task: a TaskManager instance with node to act on - """ - if task.node.clean_step: - manager_utils.notify_conductor_resume_clean(task) - else: - manager_utils.notify_conductor_resume_deploy(task) - - @METRICS.timer('DracWSManBIOS.factory_reset') - @base.clean_step(priority=0, requires_ramdisk=False) - @base.deploy_step(priority=0) - def factory_reset(self, task): - """Reset the BIOS settings of the node to the factory default. - - This uses the Lifecycle Controller configuration to perform - BIOS configuration reset. Leveraging the python-dracclient - methods already available. - - :param task: a TaskManager instance containing the node to act on - :raises: DracOperationError on an error from python-dracclient - :returns: states.CLEANWAIT (cleaning) or states.DEPLOYWAIT - (deployment) if reset is in progress asynchronously or None - if it is completed. - """ - node = task.node - drac_job.validate_job_queue(node) - client = drac_common.get_drac_client(node) - lc_bios_reset_attrib = { - "BIOS Reset To Defaults Requested": "True" - } - try: - set_result = client.set_lifecycle_settings(lc_bios_reset_attrib) - except drac_exceptions.BaseClientException as exc: - LOG.error('Failed to reset BIOS on the node %(node_uuid)s.' - ' Reason: %(error)s.', {'node_uuid': node.uuid, - 'error': exc}) - raise exception.DracOperationError(error=exc) - if not set_result['is_commit_required']: - LOG.info("BIOS reset successful on the node " - "%(node_uuid)s", {"node_uuid": node.uuid}) - return - else: - # Rebooting the Node is compulsory, LC call returns - # reboot_required=False/Optional, which is not desired - reboot_needed = True - try: - factory_reset_time_before_reboot =\ - client.get_system().last_system_inventory_time - - LOG.debug("Factory resetting node %(node_uuid)s " - "last inventory reboot time before factory reset " - "%(factory_reset_time_before_reboot)s", - {"node_uuid": task.node.uuid, - "factory_reset_time_before_reboot": - factory_reset_time_before_reboot}) - - commit_job_id = client.commit_pending_lifecycle_changes( - reboot=reboot_needed) - LOG.info("Commit job id of a node %(node_uuid)s." - "%(commit_job_id)s", {'node_uuid': node.uuid, - "commit_job_id": commit_job_id}) - except drac_exceptions.BaseClientException as exc: - LOG.error('Failed to commit BIOS reset on node ' - '%(node_uuid)s. Reason: %(error)s.', { - 'node_uuid': node.uuid, - 'error': exc}) - raise exception.DracOperationError(error=exc) - # Store the last inventory time on reboot for async job handler - # _check_last_system_inventory_changed - node.set_driver_internal_info('factory_reset_time_before_reboot', - factory_reset_time_before_reboot) - # Store the current time to later check if factory reset times out - node.timestamp_driver_internal_info('factory_reset_time') - - # rebooting the server to apply factory reset value - task.driver.power.reboot(task) - - # This method calls node.save(), bios_config_job_id will be - # saved automatically - # These flags are for the conductor to manage the asynchronous - # jobs that have been initiated by this method - deploy_utils.set_async_step_flags( - node, - reboot=reboot_needed, - skip_current_step=True, - polling=True) - - return deploy_utils.get_async_step_return_state(task.node) - - def cache_bios_settings(self, task): - """Store or update the current BIOS settings for the node. - - Get the current BIOS settings and store them in the bios_settings - database table. - - :param task: a TaskManager instance containing the node to act on. - :raises: DracOperationError on an error from python-dracclient - """ - node = task.node - node_id = node.id - node_uuid = node.uuid - - client = drac_common.get_drac_client(node) - - try: - kwsettings = client.list_bios_settings() - except drac_exceptions.BaseClientException as exc: - LOG.error('DRAC driver failed to get the BIOS settings for node ' - '%(node_uuid)s. Reason: %(error)s.', - {'node_uuid': node.uuid, - 'error': exc}) - raise exception.DracOperationError(error=exc) - - # convert dracclient BIOS settings into ironic settings list - settings = [{"name": name, "value": attrib.current_value} - for name, attrib in kwsettings.items()] - - # Store them in the database table - LOG.debug('Caching BIOS settings for node %(node_uuid)s', { - 'node_uuid': node_uuid}) - create_list, update_list, delete_list, nochange_list = ( - objects.BIOSSettingList.sync_node_setting( - task.context, node_id, settings)) - - if create_list: - objects.BIOSSettingList.create( - task.context, node_id, create_list) - if update_list: - objects.BIOSSettingList.save( - task.context, node_id, update_list) - if delete_list: - delete_names = [d['name'] for d in delete_list] - objects.BIOSSettingList.delete( - task.context, node_id, delete_names) - - # BaseInterface methods implementation - def get_properties(self): - """Return the properties of the BIOS Interface - - :returns: dictionary of : entries - """ - return drac_common.COMMON_PROPERTIES - - def validate(self, task): - """Validates the driver-specific information used by the idrac BMC - - :param task: a TaskManager instance containing the node to act on - :raises: InvalidParameterValue if some mandatory information - is missing on the node or on invalid inputs - """ - drac_common.parse_driver_info(task.node) - - -def get_config(node): - """Get the BIOS configuration. - - The BIOS settings look like:: - - {'EnumAttrib': {'name': 'EnumAttrib', - 'current_value': 'Value', - 'pending_value': 'New Value', # could also be None - 'read_only': False, - 'possible_values': ['Value', 'New Value', 'None']}, - 'StringAttrib': {'name': 'StringAttrib', - 'current_value': 'Information', - 'pending_value': None, - 'read_only': False, - 'min_length': 0, - 'max_length': 255, - 'pcre_regex': '^[0-9A-Za-z]{0,255}$'}, - 'IntegerAttrib': {'name': 'IntegerAttrib', - 'current_value': 0, - 'pending_value': None, - 'read_only': True, - 'lower_bound': 0, - 'upper_bound': 65535}} - - :param node: an ironic node object. - :raises: DracOperationError on an error from python-dracclient. - :returns: a dictionary containing BIOS settings - - The above values are only examples, of course. BIOS attributes exposed via - this API will always be either an enumerated attribute, a string attribute, - or an integer attribute. All attributes have the following parameters: - - :param name: is the name of the BIOS attribute. - :param current_value: is the current value of the attribute. - It will always be either an integer or a string. - :param pending_value: is the new value that we want the attribute to have. - None means that there is no pending value. - :param read_only: indicates whether this attribute can be changed. - Trying to change a read-only value will result in - an error. The read-only flag can change depending - on other attributes. - A future version of this call may expose the - dependencies that indicate when that may happen. - - Enumerable attributes also have the following parameters: - - :param possible_values: is an array of values it is permissible to set - the attribute to. - - String attributes also have the following parameters: - - :param min_length: is the minimum length of the string. - :param max_length: is the maximum length of the string. - :param pcre_regex: is a PCRE compatible regular expression that the string - must match. It may be None if the string is read only - or if the string does not have to match any particular - regular expression. - - Integer attributes also have the following parameters: - - :param lower_bound: is the minimum value the attribute can have. - :param upper_bound: is the maximum value the attribute can have. - """ - - client = drac_common.get_drac_client(node) - - try: - return client.list_bios_settings() - except drac_exceptions.BaseClientException as exc: - LOG.error('DRAC driver failed to get the BIOS settings for node ' - '%(node_uuid)s. Reason: %(error)s.', - {'node_uuid': node.uuid, - 'error': exc}) - raise exception.DracOperationError(error=exc) - - -def set_config(task, **kwargs): - """Sets the pending_value parameter for each of the values passed in. - - :param task: a TaskManager instance containing the node to act on. - :param kwargs: a dictionary of {'AttributeName': 'NewValue'} - :raises: DracOperationError on an error from python-dracclient. - :returns: A dictionary containing the 'is_commit_required' key with a - boolean value indicating whether commit_config() needs to be - called to make the changes, and the 'is_reboot_required' key - which has a value of 'true' or 'false'. This key is used to - indicate to the commit_config() call if a reboot should be - performed. - """ - node = task.node - drac_job.validate_job_queue(node) - - client = drac_common.get_drac_client(node) - if 'http_method' in kwargs: - del kwargs['http_method'] - - try: - return client.set_bios_settings(kwargs) - except drac_exceptions.BaseClientException as exc: - LOG.error('DRAC driver failed to set the BIOS settings for node ' - '%(node_uuid)s. Reason: %(error)s.', - {'node_uuid': node.uuid, - 'error': exc}) - raise exception.DracOperationError(error=exc) - - -def commit_config(task, reboot=False): - """Commits pending changes added by set_config - - :param task: a TaskManager instance containing the node to act on. - :param reboot: indicates whether a reboot job should be automatically - created with the config job. - :raises: DracOperationError on an error from python-dracclient. - :returns: the job_id key with the id of the newly created config job. - """ - node = task.node - drac_job.validate_job_queue(node) - - client = drac_common.get_drac_client(node) - - try: - return client.commit_pending_bios_changes(reboot) - except drac_exceptions.BaseClientException as exc: - LOG.error('DRAC driver failed to commit the pending BIOS changes ' - 'for node %(node_uuid)s. Reason: %(error)s.', - {'node_uuid': node.uuid, - 'error': exc}) - raise exception.DracOperationError(error=exc) - - -def abandon_config(task): - """Abandons uncommitted changes added by set_config - - :param task: a TaskManager instance containing the node to act on. - :raises: DracOperationError on an error from python-dracclient. - """ - node = task.node - client = drac_common.get_drac_client(node) - - try: - client.abandon_pending_bios_changes() - except drac_exceptions.BaseClientException as exc: - LOG.error('DRAC driver failed to delete the pending BIOS ' - 'settings for node %(node_uuid)s. Reason: %(error)s.', - {'node_uuid': node.uuid, - 'error': exc}) - raise exception.DracOperationError(error=exc) diff --git a/ironic/drivers/modules/drac/common.py b/ironic/drivers/modules/drac/common.py deleted file mode 100644 index be4a3da246..0000000000 --- a/ironic/drivers/modules/drac/common.py +++ /dev/null @@ -1,117 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Common functionalities shared between different DRAC modules. -""" - -from oslo_log import log as logging -from oslo_utils import importutils - -from ironic.common import exception -from ironic.common.i18n import _ -from ironic.common import utils - -drac_client = importutils.try_import('dracclient.client') -drac_constants = importutils.try_import('dracclient.constants') - -LOG = logging.getLogger(__name__) - -REQUIRED_PROPERTIES = { - 'drac_address': _('IP address or hostname of the DRAC card. Required.'), - 'drac_username': _('username used for authentication. Required.'), - 'drac_password': _('password used for authentication. Required.') -} -OPTIONAL_PROPERTIES = { - 'drac_port': _('port used for WS-Man endpoint; default is 443. Optional.'), - 'drac_path': _('path used for WS-Man endpoint; default is "/wsman". ' - 'Optional.'), - 'drac_protocol': _('protocol used for WS-Man endpoint; one of http, https;' - ' default is "https". Optional.'), -} - -COMMON_PROPERTIES = REQUIRED_PROPERTIES.copy() -COMMON_PROPERTIES.update(OPTIONAL_PROPERTIES) - - -def parse_driver_info(node): - """Parse a node's driver_info values. - - Parses the driver_info of the node, reads default values - and returns a dict containing the combination of both. - - :param node: an ironic node object. - :returns: a dict containing information from driver_info - and default values. - :raises: InvalidParameterValue if some mandatory information - is missing on the node or on invalid inputs. - """ - driver_info = node.driver_info - parsed_driver_info = {} - - error_msgs = [] - for param in REQUIRED_PROPERTIES: - try: - parsed_driver_info[param] = str(driver_info[param]) - except KeyError: - error_msgs.append(_("'%s' not supplied to DracDriver.") % param) - except UnicodeEncodeError: - error_msgs.append(_("'%s' contains non-ASCII symbol.") % param) - - parsed_driver_info['drac_port'] = driver_info.get('drac_port', 443) - - try: - parsed_driver_info['drac_path'] = str(driver_info.get('drac_path', - '/wsman')) - except UnicodeEncodeError: - error_msgs.append(_("'drac_path' contains non-ASCII symbol.")) - - try: - parsed_driver_info['drac_protocol'] = str( - driver_info.get('drac_protocol', 'https')) - - if parsed_driver_info['drac_protocol'] not in ['http', 'https']: - error_msgs.append(_("'drac_protocol' must be either 'http' or " - "'https'.")) - except UnicodeEncodeError: - error_msgs.append(_("'drac_protocol' contains non-ASCII symbol.")) - - if error_msgs: - msg = (_('The following errors were encountered while parsing ' - 'driver_info:\n%s') % '\n'.join(error_msgs)) - raise exception.InvalidParameterValue(msg) - - port = parsed_driver_info['drac_port'] - parsed_driver_info['drac_port'] = utils.validate_network_port( - port, 'drac_port') - - return parsed_driver_info - - -def get_drac_client(node): - """Returns a DRACClient object from python-dracclient library. - - :param node: an ironic node object. - :returns: a DRACClient object. - :raises: InvalidParameterValue if mandatory information is missing on the - node or on invalid input. - """ - driver_info = parse_driver_info(node) - client = drac_client.DRACClient(driver_info['drac_address'], - driver_info['drac_username'], - driver_info['drac_password'], - driver_info['drac_port'], - driver_info['drac_path'], - driver_info['drac_protocol']) - - return client diff --git a/ironic/drivers/modules/drac/inspect.py b/ironic/drivers/modules/drac/inspect.py index 8dc4c7cd17..a880eeaddd 100644 --- a/ironic/drivers/modules/drac/inspect.py +++ b/ironic/drivers/modules/drac/inspect.py @@ -15,29 +15,12 @@ DRAC inspection interface """ -from ironic_lib import metrics_utils -from oslo_log import log as logging -from oslo_utils import importutils -from oslo_utils import units - from ironic.common import boot_modes -from ironic.common import exception -from ironic.common.i18n import _ -from ironic.common import states -from ironic.common import utils -from ironic.drivers import base -from ironic.drivers.modules.drac import common as drac_common from ironic.drivers.modules.drac import utils as drac_utils from ironic.drivers.modules import inspect_utils from ironic.drivers.modules.redfish import inspect as redfish_inspect from ironic.drivers.modules.redfish import utils as redfish_utils -from ironic import objects -drac_exceptions = importutils.try_import('dracclient.exceptions') - -LOG = logging.getLogger(__name__) - -METRICS = metrics_utils.get_metrics_logger(__name__) _PXE_DEV_ENABLED_INTERFACES = [('PxeDev1EnDis', 'PxeDev1Interface'), ('PxeDev2EnDis', 'PxeDev2Interface'), @@ -63,8 +46,8 @@ class DracRedfishInspect(redfish_inspect.RedfishInspect): """ # Ensure we create a port for every NIC port found for consistency - # with our WSMAN inspect behavior and to work around a bug in some - # versions of the firmware where the port state is not being + # with our previous WSMAN inspect behavior and to work around a bug + # in some versions of the firmware where the port state is not being # reported correctly. ethernet_interfaces_mac = list(self._get_mac_address(task).values()) @@ -124,218 +107,3 @@ class DracRedfishInspect(redfish_inspect.RedfishInspect): pxe_port_macs = [mac for mac in pxe_port_macs_list] return pxe_port_macs - - -class DracWSManInspect(base.InspectInterface): - - _GPU_SUPPORTED_LIST = {"TU104GL [Tesla T4]", - "GV100GL [Tesla V100 PCIe 16GB]"} - - def get_properties(self): - """Return the properties of the interface. - - :returns: dictionary of : entries. - """ - return drac_common.COMMON_PROPERTIES - - @METRICS.timer('DracInspect.validate') - def validate(self, task): - """Validate the driver-specific info supplied. - - This method validates whether the 'driver_info' property of the - supplied node contains the required information for this driver to - manage the node. - - :param task: a TaskManager instance containing the node to act on. - :raises: InvalidParameterValue if required driver_info attribute - is missing or invalid on the node. - - """ - return drac_common.parse_driver_info(task.node) - - @METRICS.timer('DracInspect.inspect_hardware') - def inspect_hardware(self, task): - """Inspect hardware. - - Inspect hardware to obtain the essential & additional hardware - properties. - - :param task: a TaskManager instance containing the node to act on. - :raises: HardwareInspectionFailure, if unable to get essential - hardware properties. - :returns: states.MANAGEABLE - """ - - node = task.node - client = drac_common.get_drac_client(node) - properties = {} - - try: - properties['memory_mb'] = sum( - [memory.size_mb for memory in client.list_memory()]) - cpus = client.list_cpus() - if cpus: - properties['cpu_arch'] = 'x86_64' if cpus[0].arch64 else 'x86' - - bios_settings = client.list_bios_settings() - video_controllers = client.list_video_controllers() - current_capabilities = node.properties.get('capabilities', '') - new_capabilities = { - 'boot_mode': bios_settings["BootMode"].current_value.lower(), - 'pci_gpu_devices': self._calculate_gpus(video_controllers)} - - capabilities = utils.get_updated_capabilities(current_capabilities, - new_capabilities) - properties['capabilities'] = capabilities - - virtual_disks = client.list_virtual_disks() - root_disk = self._guess_root_disk(virtual_disks) - if root_disk: - properties['local_gb'] = int(root_disk.size_mb / units.Ki) - else: - physical_disks = client.list_physical_disks() - root_disk = self._guess_root_disk(physical_disks) - if root_disk: - properties['local_gb'] = int( - root_disk.size_mb / units.Ki) - except drac_exceptions.BaseClientException as exc: - LOG.error('DRAC driver failed to introspect node ' - '%(node_uuid)s. Reason: %(error)s.', - {'node_uuid': node.uuid, 'error': exc}) - raise exception.HardwareInspectionFailure(error=exc) - - valid_keys = self.ESSENTIAL_PROPERTIES - missing_keys = valid_keys - set(properties) - if missing_keys: - error = (_('Failed to discover the following properties: ' - '%(missing_keys)s') % - {'missing_keys': ', '.join(missing_keys)}) - raise exception.HardwareInspectionFailure(error=error) - - node.properties = dict(node.properties, **properties) - node.save() - - try: - nics = client.list_nics() - except drac_exceptions.BaseClientException as exc: - LOG.error('DRAC driver failed to introspect node ' - '%(node_uuid)s. Reason: %(error)s.', - {'node_uuid': node.uuid, 'error': exc}) - raise exception.HardwareInspectionFailure(error=exc) - - pxe_dev_nics = self._get_pxe_dev_nics(client, nics, node) - if pxe_dev_nics is None: - LOG.warning('No PXE enabled NIC was found for node ' - '%(node_uuid)s.', {'node_uuid': node.uuid}) - - for nic in nics: - try: - port = objects.Port(task.context, address=nic.mac, - node_id=node.id, - pxe_enabled=(nic.id in pxe_dev_nics)) - port.create() - - LOG.info('Port created with MAC address %(mac)s ' - 'for node %(node_uuid)s during inspection', - {'mac': nic.mac, 'node_uuid': node.uuid}) - except exception.MACAlreadyExists: - LOG.warning('Failed to create a port with MAC address ' - '%(mac)s when inspecting the node ' - '%(node_uuid)s because the address is already ' - 'registered', - {'mac': nic.mac, 'node_uuid': node.uuid}) - - LOG.info('Node %s successfully inspected.', node.uuid) - return states.MANAGEABLE - - def _guess_root_disk(self, disks, min_size_required_mb=4 * units.Ki): - """Find a root disk. - - :param disks: list of disks. - :param min_size_required_mb: minimum required size of the root disk in - megabytes. - :returns: root disk. - """ - disks.sort(key=lambda disk: disk.size_mb) - for disk in disks: - if disk.size_mb >= min_size_required_mb: - return disk - - def _calculate_gpus(self, video_controllers): - """Find actual GPU count. - - This method reports number of NVIDIA Tesla T4 GPU devices present - on the server. - - :param video_controllers: list of video controllers. - - :returns: returns total gpu count. - """ - gpu_cnt = 0 - for video_controller in video_controllers: - for gpu in self._GPU_SUPPORTED_LIST: - if video_controller.description == gpu: - gpu_cnt += 1 - return gpu_cnt - - def _get_pxe_dev_nics(self, client, nics, node): - """Get a list of pxe device interfaces. - - :param client: Dracclient to list the bios settings and nics - :param nics: list of nics - - :returns: Returns list of pxe device interfaces. - """ - pxe_dev_nics = [] - pxe_params = ["PxeDev1EnDis", "PxeDev2EnDis", - "PxeDev3EnDis", "PxeDev4EnDis"] - pxe_nics = ["PxeDev1Interface", "PxeDev2Interface", - "PxeDev3Interface", "PxeDev4Interface"] - - try: - bios_settings = client.list_bios_settings() - except drac_exceptions.BaseClientException as exc: - LOG.error('DRAC driver failed to list bios settings ' - 'for %(node_uuid)s. Reason: %(error)s.', - {'node_uuid': node.uuid, 'error': exc}) - raise exception.HardwareInspectionFailure(error=exc) - - if bios_settings["BootMode"].current_value == "Uefi": - for param, nic in zip(pxe_params, pxe_nics): - if param in bios_settings and bios_settings[ - param].current_value == "Enabled": - pxe_dev_nics.append( - bios_settings[nic].current_value) - elif bios_settings["BootMode"].current_value == "Bios": - for nic in nics: - try: - nic_cap = client.list_nic_settings(nic_id=nic.id) - except drac_exceptions.BaseClientException as exc: - LOG.error('DRAC driver failed to list nic settings ' - 'for %(node_uuid)s. Reason: %(error)s.', - {'node_uuid': node.uuid, 'error': exc}) - raise exception.HardwareInspectionFailure(error=exc) - - if ("LegacyBootProto" in nic_cap and nic_cap[ - 'LegacyBootProto'].current_value == "PXE"): - pxe_dev_nics.append(nic.id) - - return pxe_dev_nics - - -class DracInspect(DracWSManInspect): - """Class alias of class DracWSManInspect. - - This class provides ongoing support of the deprecated 'idrac' - inspect interface implementation entrypoint. - - All bug fixes and new features should be implemented in its base - class, DracWSManInspect. That makes them available to both the - deprecated 'idrac' and new 'idrac-wsman' entrypoints. Such changes - should not be made to this class. - """ - - def __init__(self): - super(DracInspect, self).__init__() - LOG.warning("Inspect interface 'idrac' is deprecated and may be " - "removed in a future release. Use 'idrac-wsman' instead.") diff --git a/ironic/drivers/modules/drac/job.py b/ironic/drivers/modules/drac/job.py deleted file mode 100644 index 66d89679a9..0000000000 --- a/ironic/drivers/modules/drac/job.py +++ /dev/null @@ -1,116 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -DRAC Lifecycle job specific methods -""" - -from oslo_log import log as logging -from oslo_utils import importutils -import tenacity - -from ironic.common import exception -from ironic.common.i18n import _ -from ironic.conf import CONF -from ironic.drivers.modules.drac import common as drac_common - -drac_exceptions = importutils.try_import('dracclient.exceptions') - -LOG = logging.getLogger(__name__) -WAIT_CLOCK = 5 - - -def validate_job_queue(node, name_prefix=None): - """Validates the job queue on the node. - - It raises an exception if an unfinished configuration job exists. - :param node: an ironic node object. - :param name_prefix: A name prefix for jobs to validate. - :raises: DracOperationError on an error from python-dracclient. - """ - - unfinished_jobs = list_unfinished_jobs(node) - if name_prefix is not None: - # Filter out jobs that don't match the name prefix. - unfinished_jobs = [job for job in unfinished_jobs - if job.name.startswith(name_prefix)] - if not unfinished_jobs: - return - msg = _('Unfinished config jobs found: %(jobs)r. Make sure they are ' - 'completed before retrying.') % {'jobs': unfinished_jobs} - raise exception.DracOperationError(error=msg) - - -def get_job(node, job_id): - """Get the details of a Lifecycle job of the node. - - :param node: an ironic node object. - :param job_id: ID of the Lifecycle job. - :returns: a Job object from dracclient. - :raises: DracOperationError on an error from python-dracclient. - """ - client = drac_common.get_drac_client(node) - - try: - return client.get_job(job_id) - except drac_exceptions.BaseClientException as exc: - LOG.error('DRAC driver failed to get the job %(job_id)s ' - 'for node %(node_uuid)s. Reason: %(error)s.', - {'job_id': job_id, - 'node_uuid': node.uuid, - 'error': exc}) - raise exception.DracOperationError(error=exc) - - -def list_unfinished_jobs(node): - """List unfinished config jobs of the node. - - :param node: an ironic node object. - :returns: a list of Job objects from dracclient. - :raises: DracOperationError on an error from python-dracclient. - """ - client = drac_common.get_drac_client(node) - - try: - return client.list_jobs(only_unfinished=True) - except drac_exceptions.BaseClientException as exc: - LOG.error('DRAC driver failed to get the list of unfinished jobs ' - 'for node %(node_uuid)s. Reason: %(error)s.', - {'node_uuid': node.uuid, - 'error': exc}) - raise exception.DracOperationError(error=exc) - - -@tenacity.retry( - retry=tenacity.retry_if_exception_type(exception.DracOperationError), - stop=tenacity.stop_after_attempt(CONF.drac.config_job_max_retries), - wait=tenacity.wait_fixed(WAIT_CLOCK), - reraise=True) -def wait_for_job_completion(node, - retries=CONF.drac.config_job_max_retries): - """Wait for job to complete - - It will wait for the job to complete for 20 minutes and raises timeout - if job never complete within given interval of time. - :param node: an ironic node object. - :param retries: no of retries to make conductor wait. - :raises: DracOperationError on exception raised from python-dracclient - or a timeout while waiting for job completion. - """ - if not list_unfinished_jobs(node): - return - err_msg = _( - 'There are unfinished jobs in the job ' - 'queue on node %(node_uuid)s.') % {'node_uuid': node.uuid} - LOG.warning(err_msg) - raise exception.DracOperationError(error=err_msg) diff --git a/ironic/drivers/modules/drac/management.py b/ironic/drivers/modules/drac/management.py index 4612ffbd94..f7ee661377 100644 --- a/ironic/drivers/modules/drac/management.py +++ b/ironic/drivers/modules/drac/management.py @@ -21,13 +21,11 @@ DRAC management interface """ import json -import time from ironic_lib import metrics_utils import jsonschema from jsonschema import exceptions as json_schema_exc from oslo_log import log as logging -from oslo_utils import importutils import sushy from ironic.common import boot_devices @@ -36,20 +34,15 @@ from ironic.common.i18n import _ from ironic.common import molds from ironic.common import states from ironic.conductor import periodics -from ironic.conductor import task_manager from ironic.conductor import utils as manager_utils from ironic.conf import CONF from ironic.drivers import base from ironic.drivers.modules import deploy_utils -from ironic.drivers.modules.drac import common as drac_common -from ironic.drivers.modules.drac import job as drac_job from ironic.drivers.modules.drac import utils as drac_utils from ironic.drivers.modules.redfish import management as redfish_management from ironic.drivers.modules.redfish import utils as redfish_utils -drac_exceptions = importutils.try_import('dracclient.exceptions') - LOG = logging.getLogger(__name__) METRICS = metrics_utils.get_metrics_logger(__name__) @@ -113,73 +106,6 @@ _CONF_MOLD_SCHEMA = { } -def _get_boot_device(node, drac_boot_devices=None): - client = drac_common.get_drac_client(node) - - try: - boot_modes = client.list_boot_modes() - next_boot_modes = [mode.id for mode in boot_modes if mode.is_next] - if _NON_PERSISTENT_BOOT_MODE in next_boot_modes: - next_boot_mode = _NON_PERSISTENT_BOOT_MODE - else: - next_boot_mode = next_boot_modes[0] - - if drac_boot_devices is None: - drac_boot_devices = client.list_boot_devices() - - # It is possible for there to be no boot device. - boot_device = None - - if next_boot_mode in drac_boot_devices: - drac_boot_device = drac_boot_devices[next_boot_mode][0] - - for key, value in _BOOT_DEVICES_MAP.items(): - for id_component in value: - if id_component in drac_boot_device.id: - boot_device = key - break - - if boot_device: - break - - return {'boot_device': boot_device, - 'persistent': next_boot_mode != _NON_PERSISTENT_BOOT_MODE} - except (drac_exceptions.BaseClientException, IndexError) as exc: - LOG.error('DRAC driver failed to get next boot mode for ' - 'node %(node_uuid)s. Reason: %(error)s.', - {'node_uuid': node.uuid, 'error': exc}) - raise exception.DracOperationError(error=exc) - - -def _get_next_persistent_boot_mode(node): - client = drac_common.get_drac_client(node) - - try: - boot_modes = client.list_boot_modes() - except drac_exceptions.BaseClientException as exc: - LOG.error('DRAC driver failed to get next persistent boot mode for ' - 'node %(node_uuid)s. Reason: %(error)s', - {'node_uuid': node.uuid, 'error': exc}) - raise exception.DracOperationError(error=exc) - - next_persistent_boot_mode = None - for mode in boot_modes: - if mode.is_next and mode.id != _NON_PERSISTENT_BOOT_MODE: - next_persistent_boot_mode = mode.id - break - - if not next_persistent_boot_mode: - message = _('List of boot modes, %(list_boot_modes)s, does not ' - 'contain a persistent mode') % { - 'list_boot_modes': boot_modes} - LOG.error('DRAC driver failed to get next persistent boot mode for ' - 'node %(node_uuid)s. Reason: %(message)s', - {'node_uuid': node.uuid, 'message': message}) - raise exception.DracOperationError(error=message) - - return next_persistent_boot_mode - - def _is_boot_order_flexibly_programmable(persistent, bios_settings): return persistent and 'SetBootOrderFqdd1' in bios_settings @@ -218,129 +144,6 @@ def _validate_conf_mold(data): _("Invalid configuration mold: %(error)s") % {'error': e}) -def set_boot_device(node, device, persistent=False): - """Set the boot device for a node. - - Set the boot device to use on next boot of the node. - - :param node: an ironic node object. - :param device: the boot device, one of - :mod:`ironic.common.boot_devices`. - :param persistent: Boolean value. True if the boot device will - persist to all future boots, False if not. - Default: False. - :raises: DracOperationError on an error from python-dracclient. - """ - client = drac_common.get_drac_client(node) - - # If pending BIOS job or pending non-BIOS job found in job queue, - # we need to clear that jobs before executing clear_job_queue or - # known_good_state clean step of management interface. - # Otherwise, pending BIOS config job can cause creating new config jobs - # to fail and pending non-BIOS job can execute on reboot the node. - validate_job_queue = True - if node.driver_internal_info.get("clean_steps"): - if node.driver_internal_info.get("clean_steps")[0].get( - 'step') in _CLEAR_JOBS_CLEAN_STEPS: - unfinished_jobs = drac_job.list_unfinished_jobs(node) - if unfinished_jobs: - validate_job_queue = False - client.delete_jobs(job_ids=[job.id for job in unfinished_jobs]) - - if validate_job_queue: - drac_job.validate_job_queue(node, name_prefix="Configure: BIOS") - - try: - drac_boot_devices = client.list_boot_devices() - - current_boot_device = _get_boot_device(node, drac_boot_devices) - # If we are already booting from the right device, do nothing. - if current_boot_device == {'boot_device': device, - 'persistent': persistent}: - LOG.debug('DRAC already set to boot from %s', device) - return - - persistent_boot_mode = _get_next_persistent_boot_mode(node) - - drac_boot_device = None - for drac_device in drac_boot_devices[persistent_boot_mode]: - for id_component in _BOOT_DEVICES_MAP[device]: - if id_component in drac_device.id: - drac_boot_device = drac_device.id - break - - if drac_boot_device: - break - - if drac_boot_device: - if persistent: - boot_list = persistent_boot_mode - else: - boot_list = _NON_PERSISTENT_BOOT_MODE - - client.change_boot_device_order(boot_list, drac_boot_device) - else: - # No DRAC boot device of the type requested by the argument - # 'device' is present. This is normal for UEFI boot mode, - # following deployment's writing of the operating system to - # disk. It can also occur when a server has not been - # powered on after a new boot device has been installed. - # - # If the boot order is flexibly programmable, use that to - # attempt to detect and boot from a device of the requested - # type during the next boot. That avoids the need for an - # extra reboot. Otherwise, this function cannot satisfy the - # request, because it was called with an invalid device. - bios_settings = client.list_bios_settings(by_name=True) - if _is_boot_order_flexibly_programmable(persistent, bios_settings): - drac_boot_mode = bios_settings['BootMode'].current_value - if drac_boot_mode not in _DRAC_BOOT_MODES: - message = _("DRAC reported unknown boot mode " - "'%(drac_boot_mode)s'") % { - 'drac_boot_mode': drac_boot_mode} - LOG.error('DRAC driver failed to change boot device order ' - 'for node %(node_uuid)s. Reason: %(message)s.', - {'node_uuid': node.uuid, 'message': message}) - raise exception.DracOperationError(error=message) - - flexibly_program_settings = _flexibly_program_boot_order( - device, drac_boot_mode) - client.set_bios_settings(flexibly_program_settings) - else: - raise exception.InvalidParameterValue( - _("set_boot_device called with invalid device " - "'%(device)s' for node %(node_id)s.") % - {'device': device, 'node_id': node.uuid}) - - job_id = client.commit_pending_bios_changes() - job_entry = client.get_job(job_id) - - timeout = CONF.drac.boot_device_job_status_timeout - end_time = time.time() + timeout - - LOG.debug('Waiting for BIOS configuration job %(job_id)s ' - 'to be scheduled for node %(node)s', - {'job_id': job_id, - 'node': node.uuid}) - - while job_entry.status != "Scheduled": - if time.time() >= end_time: - raise exception.DracOperationError( - error=_( - 'Timed out waiting BIOS configuration for job ' - '%(job)s to reach Scheduled state. Job is still ' - 'in %(status)s state.') % - {'job': job_id, 'status': job_entry.status}) - time.sleep(3) - job_entry = client.get_job(job_id) - - except drac_exceptions.BaseClientException as exc: - LOG.error('DRAC driver failed to change boot device order for ' - 'node %(node_uuid)s. Reason: %(error)s.', - {'node_uuid': node.uuid, 'error': exc}) - raise exception.DracOperationError(error=exc) - - class DracRedfishManagement(redfish_management.RedfishManagement): """iDRAC Redfish interface for management-related actions.""" @@ -633,10 +436,7 @@ class DracRedfishManagement(redfish_management.RedfishManagement): LOG.warning('iDRAC on node %(node)s does not support ' 'clearing Lifecycle Controller job queue ' 'using the idrac-redfish driver. ' - 'If using iDRAC9, consider upgrading firmware. ' - 'If using iDRAC8, consider switching to ' - 'idrac-wsman for management interface if ' - 'possible.', + 'If using iDRAC9, consider upgrading firmware.', {'node': task.node.uuid}) if task.node.provision_state != states.VERIFYING: raise @@ -661,10 +461,7 @@ class DracRedfishManagement(redfish_management.RedfishManagement): if "Oem/Dell/DelliDRACCardService is missing" in str(exc): LOG.warning('iDRAC on node %(node)s does not support ' 'iDRAC reset using the idrac-redfish driver. ' - 'If using iDRAC9, consider upgrading firmware. ' - 'If using iDRAC8, consider switching to ' - 'idrac-wsman for management interface if ' - 'possible.', + 'If using iDRAC9, consider upgrading firmware. ', {'node': task.node.uuid}) if task.node.provision_state != states.VERIFYING: raise @@ -686,181 +483,3 @@ class DracRedfishManagement(redfish_management.RedfishManagement): self.clear_job_queue(task) LOG.info('Reset iDRAC to known good state for node %(node)s', {'node': task.node.uuid}) - - -class DracWSManManagement(base.ManagementInterface): - - # NOTE(TheJulia): Deprecating November 2023 in favor of Redfish - # and due to a lack of active driver maintenance. - supported = False - - def get_properties(self): - """Return the properties of the interface.""" - return drac_common.COMMON_PROPERTIES - - @METRICS.timer('DracManagement.validate') - def validate(self, task): - """Validate the driver-specific info supplied. - - This method validates whether the 'driver_info' property of the - supplied node contains the required information for this driver to - manage the node. - - :param task: a TaskManager instance containing the node to act on. - :raises: InvalidParameterValue if required driver_info attribute - is missing or invalid on the node. - - """ - return drac_common.parse_driver_info(task.node) - - @METRICS.timer('DracManagement.get_supported_boot_devices') - def get_supported_boot_devices(self, task): - """Get a list of the supported boot devices. - - :param task: a TaskManager instance containing the node to act on. - :returns: A list with the supported boot devices defined - in :mod:`ironic.common.boot_devices`. - - """ - return list(_BOOT_DEVICES_MAP) - - @METRICS.timer('DracManagement.get_boot_device') - def get_boot_device(self, task): - """Get the current boot device for a node. - - Returns the current boot device of the node. - - :param task: a TaskManager instance containing the node to act on. - :raises: DracOperationError on an error from python-dracclient. - :returns: a dictionary containing: - - :boot_device: the boot device, one of - :mod:`ironic.common.boot_devices` or None if it is unknown. - :persistent: whether the boot device will persist to all future - boots or not, None if it is unknown. - """ - node = task.node - - boot_device = node.driver_internal_info.get('drac_boot_device') - if boot_device is not None: - return boot_device - - return _get_boot_device(node) - - @METRICS.timer('DracManagement.set_boot_device') - @task_manager.require_exclusive_lock - def set_boot_device(self, task, device, persistent=False): - """Set the boot device for a node. - - Set the boot device to use on next reboot of the node. - - :param task: a TaskManager instance containing the node to act on. - :param device: the boot device, one of - :mod:`ironic.common.boot_devices`. - :param persistent: Boolean value. True if the boot device will - persist to all future boots, False if not. - Default: False. - :raises: InvalidParameterValue if an invalid boot device is specified. - """ - node = task.node - - if device not in _BOOT_DEVICES_MAP: - raise exception.InvalidParameterValue( - _("set_boot_device called with invalid device '%(device)s' " - "for node %(node_id)s.") % {'device': device, - 'node_id': node.uuid}) - - # NOTE(ifarkas): DRAC interface doesn't allow changing the boot device - # multiple times in a row without a reboot. This is - # because a change need to be committed via a - # configuration job, and further configuration jobs - # cannot be created until the previous one is processed - # at the next boot. As a workaround, saving it to - # driver_internal_info and committing the change during - # power state change. - node.set_driver_internal_info('drac_boot_device', - {'boot_device': device, - 'persistent': persistent}) - node.save() - - @METRICS.timer('DracManagement.get_sensors_data') - def get_sensors_data(self, task): - """Get sensors data. - - :param task: a TaskManager instance. - :raises: FailedToGetSensorData when getting the sensor data fails. - :raises: FailedToParseSensorData when parsing sensor data fails. - :returns: returns a consistent format dict of sensor data grouped by - sensor type, which can be processed by Ceilometer. - """ - raise NotImplementedError() - - @METRICS.timer('DracManagement.reset_idrac') - @base.verify_step(priority=0) - @base.clean_step(priority=0, requires_ramdisk=False) - def reset_idrac(self, task): - """Reset the iDRAC. - - :param task: a TaskManager instance containing the node to act on. - :returns: None if it is completed. - :raises: DracOperationError on an error from python-dracclient. - """ - node = task.node - - client = drac_common.get_drac_client(node) - client.reset_idrac(force=True, wait=True) - - @METRICS.timer('DracManagement.known_good_state') - @base.verify_step(priority=0) - @base.clean_step(priority=0, requires_ramdisk=False) - def known_good_state(self, task): - """Reset the iDRAC, Clear the job queue. - - :param task: a TaskManager instance containing the node to act on. - :returns: None if it is completed. - :raises: DracOperationError on an error from python-dracclient. - """ - node = task.node - - client = drac_common.get_drac_client(node) - client.reset_idrac(force=True, wait=True) - client.delete_jobs(job_ids=[_CLEAR_JOB_IDS]) - - @METRICS.timer('DracManagement.clear_job_queue') - @base.verify_step(priority=0) - @base.clean_step(priority=0, requires_ramdisk=False) - def clear_job_queue(self, task): - """Clear the job queue. - - :param task: a TaskManager instance containing the node to act on. - :returns: None if it is completed. - :raises: DracOperationError on an error from python-dracclient. - """ - try: - node = task.node - - client = drac_common.get_drac_client(node) - client.delete_jobs(job_ids=[_CLEAR_JOB_IDS]) - except drac_exceptions.BaseClientException as exc: - LOG.error('DRAC driver failed to clear the job queue for node ' - '%(node_uuid)s. Reason: %(error)s.', - {'node_uuid': node.uuid, 'error': exc}) - raise exception.DracOperationError(error=exc) - - -class DracManagement(DracWSManManagement): - """Class alias of class DracWSManManagement. - - This class provides ongoing support of the deprecated 'idrac' - management interface implementation entrypoint. - - All bug fixes and new features should be implemented in its base - class, DracWSManManagement. That makes them available to both the - deprecated 'idrac' and new 'idrac-wsman' entrypoints. Such changes - should not be made to this class. - """ - - def __init__(self): - super(DracManagement, self).__init__() - LOG.warning("Management interface 'idrac' is deprecated and may be " - "removed in a future release. Use 'idrac-wsman' instead.") diff --git a/ironic/drivers/modules/drac/power.py b/ironic/drivers/modules/drac/power.py index 33bfbc1969..bd2df80a1a 100644 --- a/ironic/drivers/modules/drac/power.py +++ b/ironic/drivers/modules/drac/power.py @@ -15,168 +15,9 @@ DRAC power interface """ -import time -from ironic_lib import metrics_utils -from oslo_log import log as logging -from oslo_utils import importutils - -from ironic.common import exception -from ironic.common import states -from ironic.conductor import task_manager -from ironic.conductor import utils as cond_utils -from ironic.drivers import base -from ironic.drivers.modules.drac import common as drac_common -from ironic.drivers.modules.drac import management as drac_management from ironic.drivers.modules.redfish import power as redfish_power -drac_constants = importutils.try_import('dracclient.constants') -drac_exceptions = importutils.try_import('dracclient.exceptions') - -LOG = logging.getLogger(__name__) - -METRICS = metrics_utils.get_metrics_logger(__name__) - -if drac_constants: - POWER_STATES = { - drac_constants.POWER_ON: states.POWER_ON, - drac_constants.POWER_OFF: states.POWER_OFF, - drac_constants.REBOOT: states.REBOOT - } - - REVERSE_POWER_STATES = dict((v, k) for (k, v) in POWER_STATES.items()) - -POWER_STATE_TRIES = 15 -POWER_STATE_SLEEP = 2 -POWER_STATE_CHANGE_FAIL = 'The command failed to set RequestedState' - - -def _get_power_state(node): - """Returns the current power state of the node. - - :param node: an ironic node object. - :returns: the power state, one of :mod:`ironic.common.states`. - :raises: InvalidParameterValue if required DRAC credentials are missing. - :raises: DracOperationError on an error from python-dracclient - """ - - client = drac_common.get_drac_client(node) - - try: - drac_power_state = client.get_power_state() - except drac_exceptions.BaseClientException as exc: - LOG.error('DRAC driver failed to get power state for node ' - '%(node_uuid)s. Reason: %(error)s.', - {'node_uuid': node.uuid, 'error': exc}) - raise exception.DracOperationError(error=exc) - - return POWER_STATES[drac_power_state] - - -def _commit_boot_list_change(node): - - boot_device = node.driver_internal_info.get('drac_boot_device') - if boot_device is None: - return - - drac_management.set_boot_device(node, boot_device['boot_device'], - boot_device['persistent']) - - node.set_driver_internal_info('drac_boot_device', None) - node.save() - - -def _set_power_state(task, power_state, timeout=None): - """Turns the server power on/off or do a reboot. - - :param task: a TaskManager instance containing the node to act on. - :param power_state: a power state from :mod:`ironic.common.states`. - :param timeout: Time to wait for the node to reach the requested state. - When requested state is reboot, not used as not waiting then. - :raises: InvalidParameterValue if required DRAC credentials are missing. - :raises: DracOperationError on an error from python-dracclient - """ - node = task.node - # NOTE(ifarkas): DRAC interface doesn't allow changing the boot device - # multiple times in a row without a reboot. This is - # because a change need to be committed via a - # configuration job, and further configuration jobs - # cannot be created until the previous one is processed - # at the next boot. As a workaround, it is saved to - # driver_internal_info during set_boot_device and committing - # it here. - _commit_boot_list_change(node) - - client = drac_common.get_drac_client(node) - tries = POWER_STATE_TRIES - - # Cases have been seen where the iDRAC returns a SYS021 error even when - # the server is in the right power state and a valid power state change - # is attempted. Retry in this case. - while tries > 0: - # The iDRAC will return a SYS021 error if the server is powered off - # and a reboot is requested. In this situation, convert the requested - # reboot into a power on to avoid this error. To minimize the chance - # of a race condition, it is critical to do this check immediately - # before sending the power state change command. This keeps the - # window during which the server could change power states without us - # knowing about it as small as possible. - calc_power_state = power_state - if power_state == states.REBOOT: - current_power_state = _get_power_state(node) - # If the server is not on, then power it on instead of rebooting - if current_power_state != states.POWER_ON: - calc_power_state = states.POWER_ON - - target_power_state = REVERSE_POWER_STATES[calc_power_state] - - try: - client.set_power_state(target_power_state) - if calc_power_state == states.REBOOT: - # TODO(rloo): Support timeouts! - if timeout is not None: - LOG.warning("The 'idrac-wsman' Power Interface does not " - "support 'timeout' parameter when setting " - "power state to reboot. Ignoring " - "timeout=%(timeout)s", - {'timeout': timeout}) - else: - # Skipped for reboot as can't match reboot with on/off. - # Reboot so far has been part of workflow that is not followed - # by another power state change that could break the flow. - cond_utils.node_wait_for_power_state( - task, calc_power_state, timeout) - break - except drac_exceptions.BaseClientException as exc: - if (power_state == states.REBOOT - and POWER_STATE_CHANGE_FAIL in str(exc) - and tries > 0): - LOG.warning('DRAC driver failed to set power state for node ' - '%(node_uuid)s to %(calc_power_state)s. ' - 'Reason: %(error)s. Retrying...', - {'node_uuid': node.uuid, - 'calc_power_state': calc_power_state, - 'error': exc}) - tries -= 1 - time.sleep(POWER_STATE_SLEEP) - else: - LOG.error('DRAC driver failed to set power state for node ' - '%(node_uuid)s to %(calc_power_state)s. ' - 'Reason: %(error)s.', - {'node_uuid': node.uuid, - 'calc_power_state': calc_power_state, - 'error': exc}) - raise exception.DracOperationError(error=exc) - - if tries <= 0: - error_msg = (_('DRAC driver timed out while trying to set the power ' - 'state for node %(node_uuid)s to ' - '%(calc_power_state)s.') % - {'node_uuid': node.uuid, - 'calc_power_state': calc_power_state}) - LOG.error(error_msg) - raise exception.DracOperationError(error_msg) - class DracRedfishPower(redfish_power.RedfishPower): """iDRAC Redfish interface for power-related actions. @@ -187,87 +28,3 @@ class DracRedfishPower(redfish_power.RedfishPower): should be implemented by this class. """ pass - - -class DracWSManPower(base.PowerInterface): - """Interface for power-related actions.""" - - # NOTE(TheJulia): Deprecating November 2023 in favor of Redfish - # and due to a lack of active driver maintenance. - supported = False - - def get_properties(self): - """Return the properties of the interface.""" - return drac_common.COMMON_PROPERTIES - - @METRICS.timer('DracPower.validate') - def validate(self, task): - """Validate the driver-specific Node power info. - - This method validates whether the 'driver_info' property of the - supplied node contains the required information for this driver to - manage the power state of the node. - - :param task: a TaskManager instance containing the node to act on. - :raises: InvalidParameterValue if required driver_info attribute - is missing or invalid on the node. - """ - return drac_common.parse_driver_info(task.node) - - @METRICS.timer('DracPower.get_power_state') - def get_power_state(self, task): - """Return the power state of the node. - - :param task: a TaskManager instance containing the node to act on. - :returns: the power state, one of :mod:`ironic.common.states`. - :raises: InvalidParameterValue if required DRAC credentials are - missing. - :raises: DracOperationError on an error from python-dracclient. - """ - return _get_power_state(task.node) - - @METRICS.timer('DracPower.set_power_state') - @task_manager.require_exclusive_lock - def set_power_state(self, task, power_state, timeout=None): - """Set the power state of the node. - - :param task: a TaskManager instance containing the node to act on. - :param power_state: a power state from :mod:`ironic.common.states`. - :param timeout: Time to wait for the node to reach the requested state. - When requested state is reboot, not used as not waiting then. - :raises: InvalidParameterValue if required DRAC credentials are - missing. - :raises: DracOperationError on an error from python-dracclient. - """ - _set_power_state(task, power_state, timeout) - - @METRICS.timer('DracPower.reboot') - @task_manager.require_exclusive_lock - def reboot(self, task, timeout=None): - """Perform a reboot of the task's node. - - :param task: a TaskManager instance containing the node to act on. - :param timeout: timeout (in seconds). Unsupported by this interface. - :raises: InvalidParameterValue if required DRAC credentials are - missing. - :raises: DracOperationError on an error from python-dracclient. - """ - _set_power_state(task, states.REBOOT, timeout) - - -class DracPower(DracWSManPower): - """Class alias of class DracWSManPower. - - This class provides ongoing support of the deprecated 'idrac' power - interface implementation entrypoint. - - All bug fixes and new features should be implemented in its base - class, DracWSManPower. That makes them available to both the - deprecated 'idrac' and new 'idrac-wsman' entrypoints. Such changes - should not be made to this class. - """ - - def __init__(self): - super(DracPower, self).__init__() - LOG.warning("Power interface 'idrac' is deprecated and may be removed " - "in a future release. Use 'idrac-wsman' instead.") diff --git a/ironic/drivers/modules/drac/raid.py b/ironic/drivers/modules/drac/raid.py index 6a3a06baf6..de22493c29 100644 --- a/ironic/drivers/modules/drac/raid.py +++ b/ironic/drivers/modules/drac/raid.py @@ -15,1150 +15,30 @@ DRAC RAID specific methods """ -from collections import defaultdict -import math - from ironic_lib import metrics_utils from oslo_log import log as logging from oslo_utils import importutils -from oslo_utils import units import sushy import tenacity from ironic.common import exception from ironic.common.i18n import _ -from ironic.common import raid as raid_common from ironic.common import states from ironic.conductor import periodics from ironic.conductor import utils as manager_utils from ironic.conf import CONF from ironic.drivers import base from ironic.drivers.modules import deploy_utils -from ironic.drivers.modules.drac import common as drac_common -from ironic.drivers.modules.drac import job as drac_job from ironic.drivers.modules.drac import utils as drac_utils from ironic.drivers.modules.redfish import raid as redfish_raid from ironic.drivers.modules.redfish import utils as redfish_utils -drac_exceptions = importutils.try_import('dracclient.exceptions') -drac_constants = importutils.try_import('dracclient.constants') sushy_oem_idrac = importutils.try_import('sushy_oem_idrac') LOG = logging.getLogger(__name__) METRICS = metrics_utils.get_metrics_logger(__name__) -_CURRENT_RAID_CONTROLLER_MODE = "RAIDCurrentControllerMode" -_REQUESTED_RAID_CONTROLLER_MODE = "RAIDRequestedControllerMode" -_EHBA_MODE = "Enhanced HBA" -_RAID_MODE = "RAID" - -RAID_LEVELS = { - '0': { - 'min_disks': 1, - 'max_disks': 1000, - 'type': 'simple', - 'overhead': 0 - }, - '1': { - 'min_disks': 2, - 'max_disks': 2, - 'type': 'simple', - 'overhead': 1 - }, - '5': { - 'min_disks': 3, - 'max_disks': 1000, - 'type': 'simple', - 'overhead': 1 - }, - '6': { - 'min_disks': 4, - 'max_disks': 1000, - 'type': 'simple', - 'overhead': 2 - }, - '1+0': { - 'type': 'spanned', - 'span_type': '1' - }, - '5+0': { - 'type': 'spanned', - 'span_type': '5' - }, - '6+0': { - 'type': 'spanned', - 'span_type': '6' - } -} - - -def list_raid_controllers(node): - """List the RAID controllers of the node. - - :param node: an ironic node object. - :returns: a list of RAIDController objects from dracclient. - :raises: DracOperationError on an error from python-dracclient. - """ - client = drac_common.get_drac_client(node) - - try: - return client.list_raid_controllers() - except drac_exceptions.BaseClientException as exc: - LOG.error('DRAC driver failed to get the list of RAID controllers ' - 'for node %(node_uuid)s. Reason: %(error)s.', - {'node_uuid': node.uuid, 'error': exc}) - raise exception.DracOperationError(error=exc) - - -def list_virtual_disks(node): - """List the virtual disks of the node. - - :param node: an ironic node object. - :returns: a list of VirtualDisk objects from dracclient. - :raises: DracOperationError on an error from python-dracclient. - """ - client = drac_common.get_drac_client(node) - - try: - return client.list_virtual_disks() - except drac_exceptions.BaseClientException as exc: - LOG.error('DRAC driver failed to get the list of virtual disks ' - 'for node %(node_uuid)s. Reason: %(error)s.', - {'node_uuid': node.uuid, 'error': exc}) - raise exception.DracOperationError(error=exc) - - -def list_physical_disks(node): - """List the physical disks of the node. - - :param node: an ironic node object. - :returns: a list of PhysicalDisk objects from dracclient. - :raises: DracOperationError on an error from python-dracclient. - """ - client = drac_common.get_drac_client(node) - - try: - return client.list_physical_disks() - except drac_exceptions.BaseClientException as exc: - LOG.error('DRAC driver failed to get the list of physical disks ' - 'for node %(node_uuid)s. Reason: %(error)s.', - {'node_uuid': node.uuid, 'error': exc}) - raise exception.DracOperationError(error=exc) - - -def _is_raid_controller(node, raid_controller_fqdd, raid_controllers=None): - """Find out if object's fqdd is for a raid controller or not - - :param node: an ironic node object - :param raid_controller_fqdd: The object's fqdd we are testing to see - if it is a raid controller or not. - :param raid_controllers: A list of RAIDControllers used to check for - the presence of BOSS cards. If None, the - iDRAC will be queried for the list of - controllers. - :returns: boolean, True if the device is a RAID controller, - False if not. - """ - client = drac_common.get_drac_client(node) - - try: - return client.is_raid_controller(raid_controller_fqdd, - raid_controllers) - except drac_exceptions.BaseClientException as exc: - LOG.error('Unable to determine if controller %(raid_controller_fqdd)s ' - 'on node %(node_uuid)s is a RAID controller. ' - 'Reason: %(error)s. ', - {'raid_controller_fqdd': raid_controller_fqdd, - 'node_uuid': node.uuid, 'error': exc}) - - raise exception.DracOperationError(error=exc) - - -def _validate_job_queue(node, raid_controller=None): - """Validate that there are no pending jobs for this controller. - - :param node: an ironic node object. - :param raid_controller: id of the RAID controller. - """ - kwargs = {} - if raid_controller: - kwargs["name_prefix"] = "Config:RAID:%s" % raid_controller - drac_job.validate_job_queue(node, **kwargs) - - -def create_virtual_disk(node, raid_controller, physical_disks, raid_level, - size_mb, disk_name=None, span_length=None, - span_depth=None): - """Create a single virtual disk on a RAID controller. - - The created virtual disk will be in pending state. The DRAC card will do - the actual configuration once the changes are applied by calling the - ``commit_config`` method. - - :param node: an ironic node object. - :param raid_controller: id of the RAID controller. - :param physical_disks: ids of the physical disks. - :param raid_level: RAID level of the virtual disk. - :param size_mb: size of the virtual disk. - :param disk_name: name of the virtual disk. (optional) - :param span_depth: Number of spans in virtual disk. (optional) - :param span_length: Number of disks per span. (optional) - :returns: a dictionary containing the commit_needed key with a boolean - value indicating whether a config job must be created for the - values to be applied. - :raises: DracOperationError on an error from python-dracclient. - """ - # This causes config to fail, because the boot mode is set via a config - # job. - _validate_job_queue(node, raid_controller) - - client = drac_common.get_drac_client(node) - - try: - return client.create_virtual_disk(raid_controller, physical_disks, - raid_level, size_mb, disk_name, - span_length, span_depth) - except drac_exceptions.BaseClientException as exc: - LOG.error('DRAC driver failed to create virtual disk for node ' - '%(node_uuid)s. Reason: %(error)s.', - {'node_uuid': node.uuid, - 'error': exc}) - raise exception.DracOperationError(error=exc) - - -def delete_virtual_disk(node, virtual_disk): - """Delete a single virtual disk on a RAID controller. - - The deleted virtual disk will be in pending state. The DRAC card will do - the actual configuration once the changes are applied by calling the - ``commit_config`` method. - - :param node: an ironic node object. - :param virtual_disk: id of the virtual disk. - :returns: a dictionary containing the commit_needed key with a boolean - value indicating whether a config job must be created for the - values to be applied. - :raises: DracOperationError on an error from python-dracclient. - """ - # NOTE(mgoddard): Cannot specify raid_controller as we don't know it. - _validate_job_queue(node) - - client = drac_common.get_drac_client(node) - - try: - return client.delete_virtual_disk(virtual_disk) - except drac_exceptions.BaseClientException as exc: - LOG.error('DRAC driver failed to delete virtual disk ' - '%(virtual_disk_fqdd)s for node %(node_uuid)s. ' - 'Reason: %(error)s.', - {'virtual_disk_fqdd': virtual_disk, - 'node_uuid': node.uuid, - 'error': exc}) - raise exception.DracOperationError(error=exc) - - -def _reset_raid_config(node, raid_controller): - """Delete all virtual disk and unassign all hotspares physical disk - - :param node: an ironic node object. - :param raid_controller: id of the RAID controller. - :returns: a dictionary containing - - The is_commit_required needed key with a - boolean value indicating whether a config job must be created - for the values to be applied. - - The is_reboot_required key with a RebootRequired enumerated - value indicating whether the server must be rebooted to - reset configuration. - :raises: DracOperationError on an error from python-dracclient. - """ - try: - - _validate_job_queue(node, raid_controller) - - client = drac_common.get_drac_client(node) - return client.reset_raid_config(raid_controller) - except drac_exceptions.BaseClientException as exc: - LOG.error('DRAC driver failed to delete all virtual disk ' - 'and unassign all hotspares ' - 'on %(raid_controller_fqdd)s ' - 'for node %(node_uuid)s. ' - 'Reason: %(error)s.', - {'raid_controller_fqdd': raid_controller, - 'node_uuid': node.uuid, - 'error': exc}) - raise exception.DracOperationError(error=exc) - - -def clear_foreign_config(node, raid_controller): - """Free up the foreign drives. - - :param node: an ironic node object. - :param raid_controller: id of the RAID controller. - :returns: a dictionary containing - - The is_commit_required needed key with a - boolean value indicating whether a config job must be created - for the values to be applied. - - The is_reboot_required key with a RebootRequired enumerated - value indicating whether the server must be rebooted to - clear foreign configuration. - :raises: DracOperationError on an error from python-dracclient. - """ - try: - - _validate_job_queue(node, raid_controller) - - client = drac_common.get_drac_client(node) - return client.clear_foreign_config(raid_controller) - except drac_exceptions.BaseClientException as exc: - LOG.error('DRAC driver failed to free foreign driver ' - 'on %(raid_controller_fqdd)s ' - 'for node %(node_uuid)s. ' - 'Reason: %(error)s.', - {'raid_controller_fqdd': raid_controller, - 'node_uuid': node.uuid, - 'error': exc}) - raise exception.DracOperationError(error=exc) - - -def set_raid_settings(node, controller_fqdd, settings): - """Sets the RAID configuration - - It sets the pending_value parameter for each of the attributes - passed in. For the values to be applied, a config job must - be created. - - :param node: an ironic node object. - :param controller_fqdd: the ID of the RAID controller. - :param settings: a dictionary containing the proposed values, with - each key being the name of attribute and the value - being the proposed value. - :returns: a dictionary containing: - - The is_commit_required key with a boolean value indicating - whether a config job must be created for the values to be - applied. - - The is_reboot_required key with a RebootRequired enumerated - value indicating whether the server must be rebooted for the - values to be applied. Possible values are true and false. - :raises: DRACOperationFailed on error reported back by the DRAC - interface - """ - try: - - drac_job.validate_job_queue(node) - - client = drac_common.get_drac_client(node) - return client.set_raid_settings(controller_fqdd, settings) - except drac_exceptions.BaseClientException as exc: - LOG.error('DRAC driver failed to set raid settings ' - 'on %(raid_controller_fqdd)s ' - 'for node %(node_uuid)s. ' - 'Reason: %(error)s.', - {'raid_controller_fqdd': controller_fqdd, - 'node_uuid': node.uuid, - 'error': exc}) - raise exception.DracOperationError(error=exc) - - -def list_raid_settings(node): - """List the RAID configuration settings - - :param node: an ironic node object. - :returns: a dictionary with the RAID settings using InstanceID as the - key. The attributes are RAIDEnumerableAttribute, - RAIDStringAttribute and RAIDIntegerAttribute objects. - :raises: DRACOperationFailed on error reported back by the DRAC - interface - """ - try: - - drac_job.validate_job_queue(node) - - client = drac_common.get_drac_client(node) - return client.list_raid_settings() - except drac_exceptions.BaseClientException as exc: - LOG.error('DRAC driver failed to list raid settings ' - 'for node %(node_uuid)s. ' - 'Reason: %(error)s.', - {'node_uuid': node.uuid, - 'error': exc}) - raise exception.DracOperationError(error=exc) - - -def change_physical_disk_state(node, mode=None, - controllers_to_physical_disk_ids=None): - """Convert disks RAID status - - This method converts the requested physical disks from - RAID to JBOD or vice versa. It does this by only converting the - disks that are not already in the correct state. - - :param node: an ironic node object. - :param mode: the mode to change the disks either to RAID or JBOD. - :param controllers_to_physical_disk_ids: Dictionary of controllers and - corresponding disk ids to convert to the requested mode. - :return: a dictionary containing: - - conversion_results, a dictionary that maps controller ids - to the conversion results for that controller. - The conversion results are a dict that contains: - - The is_commit_required key with the value always set to - True indicating that a config job must be created to - complete disk conversion. - - The is_reboot_required key with a RebootRequired - enumerated value indicating whether the server must be - rebooted to complete disk conversion. - :raises: DRACOperationError on an error from python-dracclient. - """ - try: - drac_job.validate_job_queue(node) - client = drac_common.get_drac_client(node) - return client.change_physical_disk_state( - mode, controllers_to_physical_disk_ids) - except drac_exceptions.BaseClientException as exc: - LOG.error('DRAC driver failed to change physical drives ' - 'to %(mode)s mode for node %(node_uuid)s. ' - 'Reason: %(error)s.', - {'mode': mode, 'node_uuid': node.uuid, 'error': exc}) - raise exception.DracOperationError(error=exc) - - -def commit_config(node, raid_controller, reboot=False, realtime=False): - """Apply all pending changes on a RAID controller. - - :param node: an ironic node object. - :param raid_controller: id of the RAID controller. - :param reboot: indicates whether a reboot job should be automatically - created with the config job. (optional, defaults to False) - :param realtime: indicates RAID controller supports realtime. - (optional, defaults to False) - :returns: id of the created job - :raises: DracOperationError on an error from python-dracclient. - """ - client = drac_common.get_drac_client(node) - - try: - return client.commit_pending_raid_changes( - raid_controller=raid_controller, - reboot=reboot, - realtime=realtime) - except drac_exceptions.BaseClientException as exc: - LOG.error('DRAC driver failed to commit pending RAID config for' - ' controller %(raid_controller_fqdd)s on node ' - '%(node_uuid)s. Reason: %(error)s.', - {'raid_controller_fqdd': raid_controller, - 'node_uuid': node.uuid, - 'error': exc}) - raise exception.DracOperationError(error=exc) - - -def _change_physical_disk_mode(node, mode=None, - controllers_to_physical_disk_ids=None, - substep="completed"): - """Physical drives conversion from RAID to JBOD or vice-versa. - - :param node: an ironic node object. - :param mode: the mode to change the disks either to RAID or JBOD. - :param controllers_to_physical_disk_ids: Dictionary of controllers and - corresponding disk ids to convert to the requested mode. - :returns: states.CLEANWAIT if deletion is in progress asynchronously - or None if it is completed. - """ - change_disk_state = change_physical_disk_state( - node, mode, controllers_to_physical_disk_ids) - - controllers = list() - conversion_results = change_disk_state['conversion_results'] - for controller_id, result in conversion_results.items(): - controller = {'raid_controller': controller_id, - 'is_reboot_required': result['is_reboot_required'], - 'is_commit_required': result['is_commit_required']} - controllers.append(controller) - - return _commit_to_controllers( - node, - controllers, substep=substep) - - -def abandon_config(node, raid_controller): - """Deletes all pending changes on a RAID controller. - - :param node: an ironic node object. - :param raid_controller: id of the RAID controller. - :raises: DracOperationError on an error from python-dracclient. - """ - client = drac_common.get_drac_client(node) - - try: - client.abandon_pending_raid_changes(raid_controller) - except drac_exceptions.BaseClientException as exc: - LOG.error('DRAC driver failed to delete pending RAID config ' - 'for controller %(raid_controller_fqdd)s on node ' - '%(node_uuid)s. Reason: %(error)s.', - {'raid_controller_fqdd': raid_controller, - 'node_uuid': node.uuid, - 'error': exc}) - raise exception.DracOperationError(error=exc) - - -def _calculate_spans(raid_level, disks_count): - """Calculates number of spans for a RAID level given a physical disk count - - :param raid_level: RAID level of the virtual disk. - :param disk_count: number of physical disks used for the virtual disk. - :returns: number of spans. - """ - if raid_level in ['0', '1', '5', '6']: - return 1 - elif raid_level in ['5+0', '6+0']: - return 2 - elif raid_level in ['1+0']: - return disks_count >> 1 - else: - reason = (_('Cannot calculate spans for RAID level "%s"') % - raid_level) - raise exception.DracOperationError(error=reason) - - -def _usable_disks_count(raid_level, disks_count): - """Calculates the number of disks usable for a RAID level - - ...given a physical disk count - - :param raid_level: RAID level of the virtual disk. - :param disk_count: number of physical disks used for the virtual disk. - :returns: number of disks. - """ - if raid_level in ['0', '1', '5', '6']: - return disks_count - elif raid_level in ['5+0', '6+0', '1+0']: - # largest even number less than disk_count - return (disks_count >> 1) << 1 - else: - reason = (_('RAID level %(raid_level)s is not supported by the ' - 'driver. Supported RAID levels: %(supported_raid_levels)s') - % {'raid_level': raid_level, - 'supported_raid_levels': list(RAID_LEVELS)}) - raise exception.DracOperationError(error=reason) - - -def _raid_level_min_disks(raid_level, spans_count=1): - try: - raid_level_info = RAID_LEVELS[raid_level] - except KeyError: - reason = (_('RAID level %(raid_level)s is not supported by the ' - 'driver. Supported RAID levels: %(supported_raid_levels)s') - % {'raid_level': raid_level, - 'supported_raid_levels': list(RAID_LEVELS)}) - raise exception.DracOperationError(error=reason) - - if raid_level_info['type'] == 'spanned': - if spans_count <= 1: - reason = _('Spanned RAID volumes cannot contain a single span') - raise exception.DracOperationError(error=reason) - - span_type = raid_level_info['span_type'] - raid_level_info = RAID_LEVELS[span_type] - - return raid_level_info['min_disks'] * spans_count - - -def _raid_level_max_disks(raid_level, spans_count=1): - try: - raid_level_info = RAID_LEVELS[raid_level] - except KeyError: - reason = (_('RAID level %(raid_level)s is not supported by the ' - 'driver. Supported RAID levels: %(supported_raid_levels)s') - % {'raid_level': raid_level, - 'supported_raid_levels': list(RAID_LEVELS)}) - raise exception.DracOperationError(error=reason) - - if raid_level_info['type'] == 'spanned': - if spans_count <= 1: - reason = _('Spanned RAID volumes cannot contain a single span') - raise exception.DracOperationError(error=reason) - - span_type = raid_level_info['span_type'] - raid_level_info = RAID_LEVELS[span_type] - - return raid_level_info['max_disks'] * spans_count - - -def _raid_level_overhead(raid_level, spans_count=1): - try: - raid_level_info = RAID_LEVELS[raid_level] - except KeyError: - reason = (_('RAID level %(raid_level)s is not supported by the ' - 'driver. Supported RAID levels: %(supported_raid_levels)s') - % {'raid_level': raid_level, - 'supported_raid_levels': list(RAID_LEVELS)}) - raise exception.DracOperationError(error=reason) - - if raid_level_info['type'] == 'spanned': - if spans_count <= 1: - reason = _('Spanned RAID volumes cannot contain a single span') - raise exception.DracOperationError(error=reason) - - span_type = raid_level_info['span_type'] - raid_level_info = RAID_LEVELS[span_type] - - return raid_level_info['overhead'] * spans_count - - -def _max_volume_size_mb(raid_level, physical_disks, free_space_mb, - spans_count=1, stripe_size_kb=64 * units.Ki): - # restrict the size to the smallest available space - free_spaces = [free_space_mb[disk] for disk in physical_disks] - size_kb = min(free_spaces) * units.Ki - - # NOTE(ifarkas): using math.floor so we get a volume size that does not - # exceed the available space - stripes_per_disk = int(math.floor(float(size_kb) / stripe_size_kb)) - - disks_count = len(physical_disks) - overhead_disks_count = _raid_level_overhead(raid_level, spans_count) - - return int(stripes_per_disk * stripe_size_kb - * (disks_count - overhead_disks_count) / units.Ki) - - -def _volume_usage_per_disk_mb(logical_disk, physical_disks, spans_count=1, - stripe_size_kb=64 * units.Ki): - disks_count = len(physical_disks) - overhead_disks_count = _raid_level_overhead(logical_disk['raid_level'], - spans_count) - volume_size_kb = logical_disk['size_mb'] * units.Ki - # NOTE(ifarkas): using math.ceil so we get the largest disk usage - # possible, so we can avoid over-committing - stripes_per_volume = math.ceil(float(volume_size_kb) / stripe_size_kb) - - stripes_per_disk = math.ceil( - float(stripes_per_volume) / (disks_count - overhead_disks_count)) - return int(stripes_per_disk * stripe_size_kb / units.Ki) - - -def _find_configuration(logical_disks, physical_disks, pending_delete): - """Find RAID configuration. - - This method transforms the RAID configuration defined in Ironic to a format - that is required by dracclient. This includes matching the physical disks - to RAID volumes when it's not pre-defined, or in general calculating - missing properties. - - :param logical_disks: list of logical disk definitions. - :param physical_disks: list of physical disk definitions. - :param pending_delete: Whether there is a pending deletion of virtual - disks that should be accounted for. - """ - - # shared physical disks of RAID volumes size_gb='MAX' should be - # deprioritized during the matching process to reserve as much space as - # possible. Reserved means it won't be used during matching. - volumes_with_reserved_physical_disks = [ - volume for volume in logical_disks - if ('physical_disks' in volume and volume['size_mb'] == 'MAX' - and volume.get('share_physical_disks', False))] - reserved_physical_disks = [ - disk for disk in physical_disks - for volume in volumes_with_reserved_physical_disks - if disk.id in volume['physical_disks']] - - # we require each logical disk contain only homogeneous physical disks, so - # sort them by type - physical_disks_by_type = {} - reserved_physical_disks_by_type = {} - free_space_mb = {} - for disk in physical_disks: - # calculate free disk space - free_space_mb[disk] = _get_disk_free_size_mb(disk, pending_delete) - - disk_type = (disk.controller, disk.media_type, disk.interface_type, - disk.size_mb) - if disk_type not in physical_disks_by_type: - physical_disks_by_type[disk_type] = [] - reserved_physical_disks_by_type[disk_type] = [] - - if disk in reserved_physical_disks: - reserved_physical_disks_by_type[disk_type].append(disk) - else: - physical_disks_by_type[disk_type].append(disk) - - # exclude non-shared physical disks (predefined by the user) from - # physical_disks_by_type because they are not going to be used during - # matching - for volume in logical_disks: - if ('physical_disks' in volume - and not volume.get('share_physical_disks', False)): - for disk in physical_disks: - if disk.id in volume['physical_disks']: - disk_type = (disk.controller, disk.media_type, - disk.interface_type, disk.size_mb) - if disk in physical_disks_by_type[disk_type]: - physical_disks_by_type[disk_type].remove(disk) - - processed_volumes = [] - - # step 1 - process volumes with predefined disks and exact size - for volume in [volume for volume in logical_disks - if ('physical_disks' in volume - and volume['size_mb'] != 'MAX')]: - _calculate_volume_props(volume, physical_disks, free_space_mb) - processed_volumes.append(volume) - - # step 2 - process volumes without predefined disks - volumes_without_disks = [disk for disk in logical_disks - if 'physical_disks' not in disk] - - if volumes_without_disks: - result, free_space_mb = ( - _assign_disks_to_volume(volumes_without_disks, - physical_disks_by_type, free_space_mb, - pending_delete)) - if not result: - # try again using the reserved physical disks in addition - for disk_type, disks in physical_disks_by_type.items(): - physical_disks_by_type[disk_type] += ( - reserved_physical_disks_by_type[disk_type]) - - result, free_space_mb = ( - _assign_disks_to_volume(volumes_without_disks, - physical_disks_by_type, - free_space_mb, - pending_delete)) - if not result: - error_msg = _('failed to find matching physical disks for all ' - 'logical disks') - LOG.error('DRAC driver failed to create RAID ' - 'configuration. Reason: %(error)s.', - {'error': error_msg}) - raise exception.DracOperationError(error=error_msg) - - processed_volumes += volumes_without_disks - - # step 3 - process volumes with predefined disks and size_mb == 'MAX' - for volume in [volume for volume in logical_disks - if ('physical_disks' in volume - and volume['size_mb'] == 'MAX')]: - _calculate_volume_props(volume, physical_disks, free_space_mb) - processed_volumes.append(volume) - - return processed_volumes - - -def _calculate_volume_props(logical_disk, physical_disks, free_space_mb): - selected_disks = [disk for disk in physical_disks - if disk.id in logical_disk['physical_disks']] - - spans_count = _calculate_spans( - logical_disk['raid_level'], len(selected_disks)) - - if len(selected_disks) % spans_count != 0: - error_msg = _('invalid number of physical disks was provided') - raise exception.DracOperationError(error=error_msg) - - disks_per_span = int(len(selected_disks) / spans_count) - - # Best practice is to not pass span_length and span_depth when creating a - # RAID10. The iDRAC will dynamically calculate these values using maximum - # values obtained from the RAID controller. - logical_disk['span_depth'] = None - logical_disk['span_length'] = None - if logical_disk['raid_level'] != '1+0': - logical_disk['span_depth'] = spans_count - logical_disk['span_length'] = disks_per_span - - max_volume_size_mb = _max_volume_size_mb( - logical_disk['raid_level'], selected_disks, free_space_mb, - spans_count=spans_count) - - if logical_disk['size_mb'] == 'MAX': - if max_volume_size_mb == 0: - error_msg = _("size set to 'MAX' but could not allocate physical " - "disk space") - raise exception.DracOperationError(error=error_msg) - - logical_disk['size_mb'] = max_volume_size_mb - elif max_volume_size_mb < logical_disk['size_mb']: - if max_volume_size_mb == 0: - error_msg = _('not enough physical disk space for the logical ' - 'disk') - raise exception.DracOperationError(error=error_msg) - - disk_usage = _volume_usage_per_disk_mb(logical_disk, selected_disks, - spans_count=spans_count) - - for disk in selected_disks: - if free_space_mb[disk] < disk_usage: - error_msg = _('not enough free space on physical disks for the ' - 'logical disk') - raise exception.DracOperationError(error=error_msg) - else: - free_space_mb[disk] -= disk_usage - - if 'controller' not in logical_disk: - logical_disk['controller'] = selected_disks[0].controller - - -def _assign_disks_to_volume(logical_disks, physical_disks_by_type, - free_space_mb, pending_delete): - logical_disk = logical_disks.pop(0) - raid_level = logical_disk['raid_level'] - - # iterate over all possible configurations - for (controller, disk_type, - interface_type, size_mb), disks in physical_disks_by_type.items(): - - if ('disk_type' in logical_disk - and logical_disk['disk_type'] != disk_type): - continue - - if ('interface_type' in logical_disk - and logical_disk['interface_type'] != interface_type): - continue - - # filter out disks without free disk space - disks = [disk for disk in disks if free_space_mb[disk] > 0] - - # sort disks by free size which is important if we have max disks limit - # on a volume - disks = sorted( - disks, - key=lambda disk: free_space_mb[disk]) - - # filter out disks already in use if sharing is disabled - if ('share_physical_disks' not in logical_disk - or not logical_disk['share_physical_disks']): - initial_free_size_mb = { - disk: _get_disk_free_size_mb(disk, pending_delete) - for disk in disks - } - disks = [disk for disk in disks - if initial_free_size_mb[disk] == free_space_mb[disk]] - - max_spans = _calculate_spans(raid_level, len(disks)) - min_spans = min([2, max_spans]) - min_disks = _raid_level_min_disks(raid_level, - spans_count=min_spans) - max_disks = _raid_level_max_disks(raid_level, - spans_count=max_spans) - candidate_max_disks = min([max_disks, len(disks)]) - - for disks_count in range(min_disks, candidate_max_disks + 1): - if ('number_of_physical_disks' in logical_disk - and (logical_disk['number_of_physical_disks'] - != disks_count)): - continue - - # skip invalid disks_count - if disks_count != _usable_disks_count(logical_disk['raid_level'], - disks_count): - continue - - selected_disks = disks[0:disks_count] - - candidate_volume = logical_disk.copy() - candidate_free_space_mb = free_space_mb.copy() - candidate_volume['physical_disks'] = [disk.id for disk - in selected_disks] - try: - _calculate_volume_props(candidate_volume, selected_disks, - candidate_free_space_mb) - except exception.DracOperationError: - continue - - if len(logical_disks) > 0: - result, candidate_free_space_mb = ( - _assign_disks_to_volume(logical_disks, - physical_disks_by_type, - candidate_free_space_mb, - pending_delete)) - if result: - logical_disks.append(candidate_volume) - return (True, candidate_free_space_mb) - else: - logical_disks.append(candidate_volume) - return (True, candidate_free_space_mb) - else: - # put back the logical_disk to queue - logical_disks.insert(0, logical_disk) - return (False, free_space_mb) - - -def _filter_logical_disks(logical_disks, include_root_volume, - include_nonroot_volumes): - filtered_disks = [] - for disk in logical_disks: - if include_root_volume and disk.get('is_root_volume'): - filtered_disks.append(disk) - - if include_nonroot_volumes and not disk.get('is_root_volume'): - filtered_disks.append(disk) - - return filtered_disks - - -def _create_config_job(node, controller, reboot=False, realtime=False, - raid_config_job_ids=[], - raid_config_parameters=[]): - job_id = commit_config(node, raid_controller=controller, - reboot=reboot, realtime=realtime) - - raid_config_job_ids.append(job_id) - if controller not in raid_config_parameters: - raid_config_parameters.append(controller) - - LOG.info('Change has been committed to RAID controller ' - '%(controller)s on node %(node)s. ' - 'DRAC job id: %(job_id)s', - {'controller': controller, 'node': node.uuid, - 'job_id': job_id}) - return {'raid_config_job_ids': raid_config_job_ids, - 'raid_config_parameters': raid_config_parameters} - - -def _validate_volume_size(node, logical_disks): - new_physical_disks = list_physical_disks(node) - free_space_mb = {} - new_processed_volumes = [] - for disk in new_physical_disks: - free_space_mb[disk] = disk.free_size_mb - - for logical_disk in logical_disks: - selected_disks = [disk for disk in new_physical_disks - if disk.id in logical_disk['physical_disks']] - - spans_count = _calculate_spans( - logical_disk['raid_level'], len(selected_disks)) - - new_max_vol_size_mb = _max_volume_size_mb( - logical_disk['raid_level'], - selected_disks, - free_space_mb, - spans_count=spans_count) - - if logical_disk['size_mb'] > new_max_vol_size_mb: - logical_disk['size_mb'] = new_max_vol_size_mb - LOG.info("Logical size does not match so calculating volume " - "properties for current logical_disk") - _calculate_volume_props( - logical_disk, new_physical_disks, free_space_mb) - new_processed_volumes.append(logical_disk) - - if new_processed_volumes: - return new_processed_volumes - - return logical_disks - - -def _switch_to_raid_mode(node, controller_fqdd): - """Convert the controller mode from Enhanced HBA to RAID mode - - :param node: an ironic node object - :param controller_fqdd: the ID of the RAID controller. - :returns: a dictionary containing - - The raid_controller key with a ID of the - RAID controller value. - - The is_commit_required needed key with a - boolean value indicating whether a config job must be created - for the values to be applied. - - The is_reboot_required key with a RebootRequired enumerated - value indicating whether the server must be rebooted to - switch the controller mode to RAID. - """ - # wait for pending jobs to complete - drac_job.wait_for_job_completion(node) - - raid_attr = "{}:{}".format(controller_fqdd, - _REQUESTED_RAID_CONTROLLER_MODE) - settings = {raid_attr: _RAID_MODE} - settings_results = set_raid_settings( - node, controller_fqdd, settings) - controller = { - 'raid_controller': controller_fqdd, - 'is_reboot_required': settings_results['is_reboot_required'], - 'is_commit_required': settings_results['is_commit_required']} - return controller - - -def _commit_to_controllers(node, controllers, substep="completed"): - """Commit changes to RAID controllers on the node. - - :param node: an ironic node object - :param controllers: a list of dictionary containing - - The raid_controller key with raid controller - fqdd value indicating on which raid configuration - job needs to be perform. - - The is_commit_required needed key with a - boolean value indicating whether a config job must - be created. - - The is_reboot_required key with a RebootRequired - enumerated value indicating whether the server must - be rebooted only if raid controller does not support - realtime. - :param substep: contain sub cleaning or deploy step which executes any raid - configuration job if set after cleaning or deploy step. - (default to completed) - :returns: states.CLEANWAIT (cleaning) or states.DEPLOYWAIT (deployment) if - configuration is in progress asynchronously or None if it is - completed. - """ - # remove controller which does not require configuration job - controllers = [controller for controller in controllers - if controller['is_commit_required']] - - if not controllers: - LOG.debug('No changes on any of the controllers on node %s', - node.uuid) - node.set_driver_internal_info('raid_config_substep', substep) - node.set_driver_internal_info('raid_config_parameters', []) - node.save() - return - - i_raid_config_parameters = [] - i_raid_config_job_ids = node.driver_internal_info.get( - 'raid_config_job_ids', []) - - optional = drac_constants.RebootRequired.optional - - # all realtime controllers - all_realtime = all( - (cntlr['is_reboot_required'] == optional) - and not (cntlr.get('is_ehba_mode')) - for cntlr in controllers) - - # check any controller with ehba mode - any_ehba_controllers = any( - cntrl.get('is_ehba_mode') is True for cntrl in controllers) - - raid_config_job_ids = [] - raid_config_parameters = [] - if all_realtime: - for controller in controllers: - realtime_controller = controller['raid_controller'] - job_details = _create_config_job( - node, controller=realtime_controller, - reboot=False, realtime=True, - raid_config_job_ids=raid_config_job_ids, - raid_config_parameters=raid_config_parameters) - - elif any_ehba_controllers: - commit_to_ehba_controllers = [] - for controller in controllers: - if controller.get('is_ehba_mode'): - job_details = _create_config_job( - node, controller=controller['raid_controller'], - reboot=False, realtime=True, - raid_config_job_ids=raid_config_job_ids, - raid_config_parameters=raid_config_parameters) - - ehba_controller = _switch_to_raid_mode( - node, controller['raid_controller']) - commit_to_ehba_controllers.append( - ehba_controller['raid_controller']) - else: - job_details = _create_config_job( - node, controller=controller['raid_controller'], - reboot=False, realtime=False, - raid_config_job_ids=raid_config_job_ids, - raid_config_parameters=raid_config_parameters) - - for controller in commit_to_ehba_controllers: - LOG.debug("Create job with Reboot to apply configuration " - "changes for ehba controllers") - job_details = _create_config_job( - node, controller=controller, - reboot=(controller == commit_to_ehba_controllers[-1]), - realtime=False, raid_config_job_ids=raid_config_job_ids, - raid_config_parameters=raid_config_parameters) - else: - for controller in controllers: - mix_controller = controller['raid_controller'] - reboot = (controller == controllers[-1]) - job_details = _create_config_job( - node, controller=mix_controller, - reboot=reboot, realtime=False, - raid_config_job_ids=raid_config_job_ids, - raid_config_parameters=raid_config_parameters) - - i_raid_config_job_ids.extend(job_details['raid_config_job_ids']) - i_raid_config_parameters.extend(job_details['raid_config_parameters']) - node.set_driver_internal_info('raid_config_substep', substep) - node.set_driver_internal_info('raid_config_parameters', - i_raid_config_parameters) - node.set_driver_internal_info('raid_config_job_ids', i_raid_config_job_ids) - - # Signal whether the node has been rebooted, that we do not need to execute - # the step again, and that this completion of this step is triggered - # through async polling. - # NOTE(mgoddard): set_async_step_flags calls node.save(). - deploy_utils.set_async_step_flags( - node, - reboot=not all_realtime, - skip_current_step=True, - polling=True) - - return deploy_utils.get_async_step_return_state(node) - - -def _create_virtual_disks(task, node): - logical_disks_to_create = node.driver_internal_info[ - 'logical_disks_to_create'] - - # Check valid properties attached to voiume after drives conversion - isVolValidationNeeded = node.driver_internal_info[ - 'volume_validation'] - if isVolValidationNeeded: - logical_disks_to_create = _validate_volume_size( - node, logical_disks_to_create) - - controllers = list() - for logical_disk in logical_disks_to_create: - controller = dict() - controller_cap = create_virtual_disk( - node, - raid_controller=logical_disk['controller'], - physical_disks=logical_disk['physical_disks'], - raid_level=logical_disk['raid_level'], - size_mb=logical_disk['size_mb'], - disk_name=logical_disk.get('name'), - span_length=logical_disk.get('span_length'), - span_depth=logical_disk.get('span_depth')) - controller['raid_controller'] = logical_disk['controller'] - controller['is_reboot_required'] = controller_cap[ - 'is_reboot_required'] - controller['is_commit_required'] = controller_cap[ - 'is_commit_required'] - if controller not in controllers: - controllers.append(controller) - - return _commit_to_controllers(node, controllers) - - -def _controller_in_hba_mode(raid_settings, controller_fqdd): - controller_mode = raid_settings.get( - '{}:{}'.format(controller_fqdd, _CURRENT_RAID_CONTROLLER_MODE)) - - return _EHBA_MODE in controller_mode.current_value - - -def _controller_supports_ehba_mode(settings, controller_fqdd): - raid_cntrl_attr = "{}:{}".format(controller_fqdd, - _CURRENT_RAID_CONTROLLER_MODE) - current_cntrl_mode = settings.get(raid_cntrl_attr) - if not current_cntrl_mode: - return False - else: - return _EHBA_MODE in current_cntrl_mode.possible_values - - -def _get_disk_free_size_mb(disk, pending_delete): - """Return the size of free space on the disk in MB. - - :param disk: a PhysicalDisk object. - :param pending_delete: Whether there is a pending deletion of all virtual - disks. - """ - return disk.size_mb if pending_delete else disk.free_size_mb - def _wait_till_realtime_ready(task): """Waits till real time operations are ready to be executed. @@ -1641,378 +521,3 @@ class DracRedfishRAID(redfish_raid.RedfishRAID): manager_utils.notify_conductor_resume_clean(task) else: manager_utils.notify_conductor_resume_deploy(task) - - -class DracWSManRAID(base.RAIDInterface): - - # NOTE(TheJulia): Deprecating November 2023 in favor of Redfish - # and due to a lack of active driver maintenance. - supported = False - - def get_properties(self): - """Return the properties of the interface.""" - return drac_common.COMMON_PROPERTIES - - @base.deploy_step(priority=0, - argsinfo=base.RAID_APPLY_CONFIGURATION_ARGSINFO) - def apply_configuration(self, task, raid_config, create_root_volume=True, - create_nonroot_volumes=False, - delete_existing=True): - return super(DracWSManRAID, self).apply_configuration( - task, raid_config, create_root_volume=create_root_volume, - create_nonroot_volumes=create_nonroot_volumes, - delete_existing=delete_existing) - - @METRICS.timer('DracRAID.create_configuration') - @base.clean_step(priority=0, abortable=False, argsinfo={ - 'create_root_volume': { - 'description': ( - 'This specifies whether to create the root volume. ' - 'Defaults to `True`.' - ), - 'required': False - }, - 'create_nonroot_volumes': { - 'description': ( - 'This specifies whether to create the non-root volumes. ' - 'Defaults to `True`.' - ), - 'required': False - }, - "delete_existing": { - "description": ( - "Setting this to 'True' indicates to delete existing RAID " - "configuration prior to creating the new configuration. " - "Default value is 'False'." - ), - "required": False, - } - }, requires_ramdisk=False) - def create_configuration(self, task, - create_root_volume=True, - create_nonroot_volumes=True, - delete_existing=False): - """Create the RAID configuration. - - This method creates the RAID configuration on the given node. - - :param task: a TaskManager instance containing the node to act on. - :param create_root_volume: If True, a root volume is created - during RAID configuration. Otherwise, no root volume is - created. Default is True. - :param create_nonroot_volumes: If True, non-root volumes are - created. If False, no non-root volumes are created. Default - is True. - :param delete_existing: Setting this to True indicates to delete RAID - configuration prior to creating the new configuration. Default is - False. - :returns: states.CLEANWAIT (cleaning) or states.DEPLOYWAIT (deployment) - if creation is in progress asynchronously or None if it is - completed. - :raises: MissingParameterValue, if node.target_raid_config is missing - or empty. - :raises: DracOperationError on an error from python-dracclient. - """ - node = task.node - - logical_disks = node.target_raid_config['logical_disks'] - - for disk in logical_disks: - if disk['size_gb'] == 'MAX' and 'physical_disks' not in disk: - raise exception.InvalidParameterValue( - _("create_configuration called with invalid " - "target_raid_configuration for node %(node_id)s. " - "'physical_disks' is missing from logical_disk while " - "'size_gb'='MAX' was requested: " - "%(logical_disk)s") % {'node_id': node.uuid, - 'logical_disk': disk}) - - if disk['size_gb'] == 'MAX': - disk['size_mb'] = 'MAX' - else: - disk['size_mb'] = disk['size_gb'] * units.Ki - - del disk['size_gb'] - - if delete_existing: - self._delete_configuration_no_commit(task) - - physical_disks = list_physical_disks(node) - logical_disks = _find_configuration(logical_disks, physical_disks, - pending_delete=delete_existing) - - logical_disks_to_create = _filter_logical_disks( - logical_disks, create_root_volume, create_nonroot_volumes) - - controllers_to_physical_disk_ids = defaultdict(list) - for logical_disk in logical_disks_to_create: - # Not applicable to JBOD logical disks. - if logical_disk['raid_level'] == 'JBOD': - continue - - for physical_disk_name in logical_disk['physical_disks']: - controllers_to_physical_disk_ids[ - logical_disk['controller']].append( - physical_disk_name) - - # adding logical_disks to driver_internal_info to create virtual disks - node.set_driver_internal_info('logical_disks_to_create', - logical_disks_to_create) - - commit_results = None - if logical_disks_to_create: - LOG.debug( - "Converting physical disks configured to back RAID " - "logical disks to RAID mode for node %(node_uuid)s ", - {"node_uuid": node.uuid}) - raid_mode = drac_constants.RaidStatus.raid - commit_results = _change_physical_disk_mode( - node, raid_mode, - controllers_to_physical_disk_ids, - substep="create_virtual_disks") - - volume_validation = True if commit_results else False - node.set_driver_internal_info('volume_validation', - volume_validation) - node.save() - - if commit_results: - return commit_results - else: - LOG.debug("Controller does not support drives conversion " - "so creating virtual disks") - return _create_virtual_disks(task, node) - - @METRICS.timer('DracRAID.delete_configuration') - @base.clean_step(priority=0, requires_ramdisk=False) - @base.deploy_step(priority=0) - def delete_configuration(self, task): - """Delete the RAID configuration. - - :param task: a TaskManager instance containing the node to act on. - :returns: states.CLEANWAIT (cleaning) or states.DEPLOYWAIT (deployment) - if deletion is in progress asynchronously or None if it is - completed. - :raises: DracOperationError on an error from python-dracclient. - """ - - controllers = self._delete_configuration_no_commit(task) - return _commit_to_controllers(task.node, controllers, - substep="delete_foreign_config") - - @METRICS.timer('DracRAID.get_logical_disks') - def get_logical_disks(self, task): - """Get the RAID configuration of the node. - - :param task: a TaskManager instance containing the node to act on. - :returns: A dictionary of properties. - :raises: DracOperationError on an error from python-dracclient. - """ - node = task.node - - logical_disks = [] - for disk in list_virtual_disks(node): - logical_disk = { - 'id': disk.id, - 'controller': disk.controller, - 'size_gb': int(disk.size_mb / units.Ki), - 'raid_level': disk.raid_level - } - - if disk.name is not None: - logical_disk['name'] = disk.name - - logical_disks.append(logical_disk) - - return {'logical_disks': logical_disks} - - @METRICS.timer('DracRAID._query_raid_config_job_status') - @periodics.node_periodic( - purpose='checking async raid configuration jobs', - spacing=CONF.drac.query_raid_config_job_status_interval, - filters={'reserved': False, 'maintenance': False}, - predicate_extra_fields=['driver_internal_info'], - predicate=lambda n: ( - n.driver_internal_info.get('raid_config_job_ids') - ), - ) - def _query_raid_config_job_status(self, task, manager, context): - """Periodic task to check the progress of running RAID config jobs.""" - self._check_node_raid_jobs(task) - - @METRICS.timer('DracRAID._check_node_raid_jobs') - def _check_node_raid_jobs(self, task): - """Check the progress of running RAID config jobs of a node.""" - - node = task.node - raid_config_job_ids = node.driver_internal_info['raid_config_job_ids'] - finished_job_ids = [] - - for config_job_id in raid_config_job_ids: - config_job = drac_job.get_job(node, job_id=config_job_id) - - if config_job is None or config_job.status == 'Completed': - finished_job_ids.append(config_job_id) - elif (config_job.status == 'Failed' - or config_job.status == 'Completed with Errors'): - finished_job_ids.append(config_job_id) - self._set_raid_config_job_failure(node) - - if not finished_job_ids: - return - - task.upgrade_lock() - self._delete_cached_config_job_id(node, finished_job_ids) - - if not node.driver_internal_info.get('raid_config_job_failure', - False): - if 'raid_config_substep' in node.driver_internal_info: - substep = node.driver_internal_info['raid_config_substep'] - - if substep == 'delete_foreign_config': - foreign_drives = self._execute_foreign_drives(task, node) - if foreign_drives is None: - return self._convert_drives(task, node) - elif substep == 'physical_disk_conversion': - self._convert_drives(task, node) - elif substep == "create_virtual_disks": - return _create_virtual_disks(task, node) - elif substep == 'completed': - self._complete_raid_substep(task, node) - else: - self._complete_raid_substep(task, node) - else: - self._clear_raid_substep(node) - self._clear_raid_config_job_failure(node) - self._set_failed(task, config_job) - - def _execute_foreign_drives(self, task, node): - controllers = list() - jobs_required = False - for controller_id in node.driver_internal_info[ - 'raid_config_parameters']: - controller_cap = clear_foreign_config( - node, controller_id) - controller = { - 'raid_controller': controller_id, - 'is_reboot_required': controller_cap['is_reboot_required'], - 'is_commit_required': controller_cap['is_commit_required']} - controllers.append(controller) - jobs_required = jobs_required or controller_cap[ - 'is_commit_required'] - - if not jobs_required: - LOG.info( - "No foreign drives detected, so " - "resume %s", "cleaning" if node.clean_step else "deployment") - return None - else: - return _commit_to_controllers( - node, - controllers, - substep='physical_disk_conversion') - - def _complete_raid_substep(self, task, node): - self._clear_raid_substep(node) - self._resume(task) - - def _convert_drives(self, task, node): - jbod = drac_constants.RaidStatus.jbod - drives_results = _change_physical_disk_mode( - node, mode=jbod) - if drives_results is None: - LOG.debug("Controller does not support drives " - "conversion on %(node_uuid)s", - {'node_uuid': node.uuid}) - self._complete_raid_substep(task, node) - - def _clear_raid_substep(self, node): - node.del_driver_internal_info('raid_config_substep') - node.del_driver_internal_info('raid_config_parameters') - node.save() - - def _set_raid_config_job_failure(self, node): - node.set_driver_internal_info('raid_config_job_failure', True) - node.save() - - def _clear_raid_config_job_failure(self, node): - node.del_driver_internal_info('raid_config_job_failure') - node.save() - - def _delete_cached_config_job_id(self, node, finished_config_job_ids=None): - if finished_config_job_ids is None: - finished_config_job_ids = [] - unfinished_job_ids = [ - job_id for job_id - in node.driver_internal_info['raid_config_job_ids'] - if job_id not in finished_config_job_ids] - node.set_driver_internal_info('raid_config_job_ids', - unfinished_job_ids) - node.save() - - def _set_failed(self, task, config_job): - error_msg = (_("Failed config job: %(config_job_id)s. " - "Message: '%(message)s'.") % - {'config_job_id': config_job.id, - 'message': config_job.message}) - log_msg = ("RAID configuration job failed for node %(node)s. " - "%(error)s" % - {'node': task.node.uuid, 'error': error_msg}) - if task.node.clean_step: - manager_utils.cleaning_error_handler(task, error_msg) - else: - manager_utils.deploying_error_handler(task, log_msg, error_msg) - - def _resume(self, task): - raid_common.update_raid_info( - task.node, self.get_logical_disks(task)) - if task.node.clean_step: - manager_utils.notify_conductor_resume_clean(task) - else: - manager_utils.notify_conductor_resume_deploy(task) - - def _delete_configuration_no_commit(self, task): - """Delete existing RAID configuration without committing the change. - - :param task: A TaskManager instance. - :returns: A set of names of RAID controllers which need RAID changes to - be committed. - """ - node = task.node - controllers = list() - drac_raid_controllers = list_raid_controllers(node) - drac_raid_settings = list_raid_settings(node) - for cntrl in drac_raid_controllers: - if _is_raid_controller(node, cntrl.id, drac_raid_controllers): - controller = dict() - if _controller_supports_ehba_mode( - drac_raid_settings, - cntrl.id) and _controller_in_hba_mode( - drac_raid_settings, cntrl.id): - controller['is_ehba_mode'] = True - controller_cap = _reset_raid_config(node, cntrl.id) - controller["raid_controller"] = cntrl.id - controller["is_reboot_required"] = controller_cap[ - "is_reboot_required"] - controller["is_commit_required"] = controller_cap[ - "is_commit_required"] - controllers.append(controller) - return controllers - - -class DracRAID(DracWSManRAID): - """Class alias of class DracWSManRAID. - - This class provides ongoing support of the deprecated 'idrac' RAID - interface implementation entrypoint. - - All bug fixes and new features should be implemented in its base - class, DracWSManRAID. That makes them available to both the - deprecated 'idrac' and new 'idrac-wsman' entrypoints. Such changes - should not be made to this class. - """ - - def __init__(self): - super(DracRAID, self).__init__() - LOG.warning("RAID interface 'idrac' is deprecated and may be removed " - "in a future release. Use 'idrac-wsman' instead.") diff --git a/ironic/drivers/modules/drac/vendor_passthru.py b/ironic/drivers/modules/drac/vendor_passthru.py index d35ddf0791..ca85b7c451 100644 --- a/ironic/drivers/modules/drac/vendor_passthru.py +++ b/ironic/drivers/modules/drac/vendor_passthru.py @@ -15,187 +15,8 @@ DRAC vendor-passthru interface """ -from ironic_lib import metrics_utils -from oslo_log import log as logging - -from ironic.common.i18n import _ -from ironic.conductor import task_manager -from ironic.drivers import base -from ironic.drivers.modules.drac import bios as drac_bios -from ironic.drivers.modules.drac import common as drac_common -from ironic.drivers.modules.drac import job as drac_job from ironic.drivers.modules.redfish import vendor as redfish_vendor -LOG = logging.getLogger(__name__) - -METRICS = metrics_utils.get_metrics_logger(__name__) - - -class DracWSManVendorPassthru(base.VendorInterface): - """Interface for DRAC specific methods.""" - - # NOTE(TheJulia): Deprecating November 2023 in favor of Redfish - # and due to a lack of active driver maintenance. - supported = False - - def get_properties(self): - """Return the properties of the interface.""" - return drac_common.COMMON_PROPERTIES - - @METRICS.timer('DracVendorPassthru.validate') - def validate(self, task, **kwargs): - """Validate the driver-specific info supplied. - - This method validates whether the 'driver_info' property of the - supplied node contains the required information for this driver to - manage the power state of the node. - - :param task: a TaskManager instance containing the node to act on. - :param kwargs: not used. - :raises: InvalidParameterValue if required driver_info attribute - is missing or invalid on the node. - """ - return drac_common.parse_driver_info(task.node) - - @METRICS.timer('DracVendorPassthru.get_bios_config') - @base.passthru(['GET'], async_call=False, - description=_("Returns a dictionary containing the BIOS " - "settings from a node.")) - def get_bios_config(self, task, **kwargs): - """Get the BIOS configuration. - - This method is used to retrieve the BIOS settings from a node. - - :param task: a TaskManager instance containing the node to act on. - :param kwargs: not used. - :raises: DracOperationError on an error from python-dracclient. - :returns: a dictionary containing BIOS settings. - """ - bios_attrs = {} - for name, bios_attr in drac_bios.get_config(task.node).items(): - bios_attrs[name] = bios_attr.__dict__ - - return bios_attrs - - @METRICS.timer('DracVendorPassthru.set_bios_config') - @base.passthru(['POST'], async_call=False, - description=_("Change the BIOS configuration on a node. " - "Required argument : a dictionary of " - "{'AttributeName': 'NewValue'}. Returns " - "a dictionary containing the " - "'is_commit_required' key with a Boolean " - "value indicating whether " - "commit_bios_config() needs to be called " - "to make the changes, and the " - "'is_reboot_required' key with a value of " - "'true' or 'false'. This key is used to " - "indicate to the commit_bios_config() call " - "if a reboot should be performed.")) - @task_manager.require_exclusive_lock - def set_bios_config(self, task, **kwargs): - """Change BIOS settings. - - This method is used to change the BIOS settings on a node. - - :param task: a TaskManager instance containing the node to act on. - :param kwargs: a dictionary of {'AttributeName': 'NewValue'} - :raises: DracOperationError on an error from python-dracclient. - :returns: A dictionary containing the ``is_commit_required`` key with a - Boolean value indicating whether commit_bios_config() needs - to be called to make the changes, and the - ``is_reboot_required`` key with a value of 'true' or 'false'. - This key is used to indicate to the commit_bios_config() call - if a reboot should be performed. - """ - return drac_bios.set_config(task, **kwargs) - - @METRICS.timer('DracVendorPassthru.commit_bios_config') - @base.passthru(['POST'], async_call=False, - description=_("Commit a BIOS configuration job submitted " - "through set_bios_config(). Required " - "argument: 'reboot' - indicates whether a " - "reboot job should be automatically created " - "with the config job. Returns a dictionary " - "containing the 'job_id' key with the ID of " - "the newly created config job, and the " - "'reboot_required' key indicating whether " - "the node needs to be rebooted to start the " - "config job.")) - @task_manager.require_exclusive_lock - def commit_bios_config(self, task, reboot=False, **kwargs): - """Commit a BIOS configuration job. - - This method is used to commit a BIOS configuration job. - submitted through set_bios_config(). - - :param task: a TaskManager instance containing the node to act on. - :param reboot: indicates whether a reboot job should be automatically - created with the config job. - :param kwargs: not used. - :raises: DracOperationError on an error from python-dracclient. - :returns: A dictionary containing the ``job_id`` key with the id of the - newly created config job, and the ``reboot_required`` key - indicating whether the node needs to be rebooted to start the - config job. - """ - job_id = drac_bios.commit_config(task, reboot=reboot) - return {'job_id': job_id, 'reboot_required': not reboot} - - @METRICS.timer('DracVendorPassthru.abandon_bios_config') - @base.passthru(['DELETE'], async_call=False, - description=_("Abandon a BIOS configuration job previously " - "submitted through set_bios_config().")) - @task_manager.require_exclusive_lock - def abandon_bios_config(self, task, **kwargs): - """Abandon a BIOS configuration job. - - This method is used to abandon a BIOS configuration previously - submitted through set_bios_config(). - - :param task: a TaskManager instance containing the node to act on. - :param kwargs: not used. - :raises: DracOperationError on an error from python-dracclient. - """ - drac_bios.abandon_config(task) - - @base.passthru(['GET'], async_call=False, - description=_('Returns a dictionary containing the key ' - '"unfinished_jobs"; its value is a list of ' - 'dictionaries. Each dictionary represents ' - 'an unfinished config Job object.')) - def list_unfinished_jobs(self, task, **kwargs): - """List unfinished config jobs of the node. - - :param task: a TaskManager instance containing the node to act on. - :param kwargs: not used. - :returns: a dictionary containing the ``unfinished_jobs`` key; this key - points to a list of dicts, with each dict representing a Job - object. - :raises: DracOperationError on an error from python-dracclient. - """ - jobs = drac_job.list_unfinished_jobs(task.node) - # FIXME(mgould) Do this without calling private methods. - return {'unfinished_jobs': [job._asdict() for job in jobs]} - - -class DracVendorPassthru(DracWSManVendorPassthru): - """Class alias of class DracWSManVendorPassthru. - - This class provides ongoing support of the deprecated 'idrac' vendor - passthru interface implementation entrypoint. - - All bug fixes and new features should be implemented in its base - class, DracWSManVendorPassthru. That makes them available to both - the deprecated 'idrac' and new 'idrac-wsman' entrypoints. Such - changes should not be made to this class. - """ - - def __init__(self): - super(DracVendorPassthru, self).__init__() - LOG.warning("Vendor passthru interface 'idrac' is deprecated and may " - "be removed in a future release. Use 'idrac-wsman' " - "instead.") - class DracRedfishVendorPassthru(redfish_vendor.RedfishVendorPassthru): """iDRAC Redfish interface for vendor_passthru. diff --git a/ironic/tests/unit/db/utils.py b/ironic/tests/unit/db/utils.py index 33e9601609..910288a591 100644 --- a/ironic/tests/unit/db/utils.py +++ b/ironic/tests/unit/db/utils.py @@ -89,12 +89,6 @@ def get_test_ilo_info(): def get_test_drac_info(): return { - "drac_address": "1.2.3.4", - "drac_port": 443, - "drac_path": "/wsman", - "drac_protocol": "https", - "drac_username": "admin", - "drac_password": "fake", "redfish_address": "1.2.3.4", "redfish_system_id": "/redfish/v1/Systems/System.Embedded.1", "redfish_username": "admin", diff --git a/ironic/tests/unit/drivers/modules/drac/test_bios.py b/ironic/tests/unit/drivers/modules/drac/test_bios.py deleted file mode 100644 index ab56fed0eb..0000000000 --- a/ironic/tests/unit/drivers/modules/drac/test_bios.py +++ /dev/null @@ -1,647 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (c) 2015-2021 Dell Inc. or its subsidiaries. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Test class for DRAC BIOS configuration specific methods -""" - -from unittest import mock - -from dracclient import exceptions as drac_exceptions -from oslo_utils import importutils -from oslo_utils import timeutils - -from ironic.common import exception -from ironic.common import states -from ironic.conductor import task_manager -from ironic.conductor import utils as manager_utils -from ironic.drivers.modules import deploy_utils -from ironic.drivers.modules.drac import bios as drac_bios -from ironic.drivers.modules.drac import common as drac_common -from ironic.drivers.modules.drac import job as drac_job -from ironic import objects -from ironic.tests.unit.drivers.modules.drac import utils as test_utils -from ironic.tests.unit.objects import utils as obj_utils - -drac_constants = importutils.try_import('dracclient.constants') - -INFO_DICT = test_utils.INFO_DICT - - -class DracWSManBIOSConfigurationTestCase(test_utils.BaseDracTest): - def setUp(self): - super(DracWSManBIOSConfigurationTestCase, self).setUp() - self.node = obj_utils.create_test_node(self.context, - driver='idrac', - driver_info=INFO_DICT) - self.bios = drac_bios.DracWSManBIOS() - patch_get_drac_client = mock.patch.object( - drac_common, 'get_drac_client', spec_set=True, autospec=True) - mock_get_drac_client = patch_get_drac_client.start() - self.mock_client = mock_get_drac_client.return_value - self.addCleanup(patch_get_drac_client.stop) - - proc_virt_attr = { - 'current_value': 'Enabled', - 'pending_value': None, - 'read_only': False, - 'possible_values': ['Enabled', 'Disabled']} - mock_proc_virt_attr = mock.NonCallableMock(spec=[], **proc_virt_attr) - mock_proc_virt_attr.name = 'ProcVirtualization' - self.bios_attrs = {'ProcVirtualization': mock_proc_virt_attr} - - self.mock_client.set_lifecycle_settings.return_value = { - "is_commit_required": True - } - self.mock_client.commit_pending_lifecycle_changes.return_value = \ - "JID_1234" - - self.mock_client.set_bios_settings.return_value = { - "is_commit_required": True, - "is_reboot_required": True - } - self.mock_client.commit_pending_bios_changes.return_value = \ - "JID_5678" - self.mock_client.get_power_state.return_value = drac_constants.POWER_ON - - @mock.patch.object(drac_common, 'parse_driver_info', - autospec=True) - def test_validate(self, mock_parse_driver_info): - with task_manager.acquire(self.context, self.node.uuid) as task: - task.driver.bios.validate(task) - mock_parse_driver_info.assert_called_once_with(task.node) - - def test_get_properties(self): - with task_manager.acquire(self.context, self.node.uuid) as task: - test_properties = task.driver.bios.get_properties() - for each_property in drac_common.COMMON_PROPERTIES: - self.assertIn(each_property, test_properties) - - @mock.patch.object(objects, 'BIOSSettingList', autospec=True) - def test_cache_bios_settings_noop(self, mock_BIOSSettingList): - create_list = [] - update_list = [] - delete_list = [] - nochange_list = [{'name': 'ProcVirtualization', 'value': 'Enabled'}] - mock_BIOSSettingList.sync_node_setting.return_value = ( - create_list, update_list, delete_list, nochange_list) - - self.mock_client.list_bios_settings.return_value = self.bios_attrs - - with task_manager.acquire(self.context, self.node.uuid) as task: - kwsettings = self.mock_client.list_bios_settings() - settings = [{"name": name, - "value": attrib.__dict__['current_value']} - for name, attrib in kwsettings.items()] - self.mock_client.list_bios_settings.reset_mock() - task.driver.bios.cache_bios_settings(task) - - self.mock_client.list_bios_settings.assert_called_once_with() - mock_BIOSSettingList.sync_node_setting.assert_called_once_with( - task.context, task.node.id, settings) - - mock_BIOSSettingList.create.assert_not_called() - mock_BIOSSettingList.save.assert_not_called() - mock_BIOSSettingList.delete.assert_not_called() - - def test_cache_bios_settings_fail(self): - exc = drac_exceptions.BaseClientException('boom') - self.mock_client.list_bios_settings.side_effect = exc - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - self.assertRaises(exception.DracOperationError, - task.driver.bios.cache_bios_settings, task) - - @mock.patch.object(deploy_utils, 'get_async_step_return_state', - autospec=True) - @mock.patch.object(deploy_utils, 'set_async_step_flags', autospec=True) - @mock.patch.object(drac_bios.DracWSManBIOS, 'cache_bios_settings', - spec_set=True) - @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, - autospec=True) - def _test_step(self, mock_validate_job_queue, mock_cache_bios_settings, - mock_set_async_step_flags, - mock_get_async_step_return_state): - if self.node.clean_step: - step_data = self.node.clean_step - expected_state = states.CLEANWAIT - mock_get_async_step_return_state.return_value = states.CLEANWAIT - else: - step_data = self.node.deploy_step - expected_state = states.DEPLOYWAIT - mock_get_async_step_return_state.return_value = states.DEPLOYWAIT - - data = step_data['argsinfo'].get('settings', None) - step = step_data['step'] - if step == 'apply_configuration': - attributes = {s['name']: s['value'] for s in data} - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - info = task.node.driver_internal_info - if step == 'factory_reset': - mock_system = None - factory_reset_time_before_reboot = None - - mock_system = mock.Mock() - factory_reset_time_before_reboot = "20200910233024" - mock_system.last_system_inventory_time = "20200910233024" - - self.mock_client.get_system.return_value = mock_system - - ret_state = task.driver.bios.factory_reset(task) - - attrib = {"BIOS Reset To Defaults Requested": "True"} - self.mock_client.set_lifecycle_settings.\ - assert_called_once_with(attrib) - self.mock_client.commit_pending_lifecycle_changes.\ - assert_called_once_with(reboot=True) - self.mock_client.get_system.assert_called_once() - self.assertEqual(factory_reset_time_before_reboot, - info['factory_reset_time_before_reboot']) - - if step == 'apply_configuration': - ret_state = task.driver.bios.apply_configuration(task, data) - - self.mock_client.set_bios_settings.assert_called_once_with( - attributes) - self.mock_client.commit_pending_bios_changes.\ - assert_called_once_with(reboot=True) - job_id = self.mock_client.commit_pending_bios_changes() - self.assertIn(job_id, info['bios_config_job_ids']) - - mock_validate_job_queue.assert_called_once_with(task.node) - mock_set_async_step_flags.assert_called_once_with( - task.node, reboot=True, skip_current_step=True, polling=True) - mock_get_async_step_return_state.assert_called_once_with( - task.node) - self.assertEqual(expected_state, ret_state) - - def test_factory_reset_clean(self): - self.node.clean_step = {'priority': 100, 'interface': 'bios', - 'step': 'factory_reset', 'argsinfo': {}} - self.node.save() - self._test_step() - - def test_factory_reset_deploy(self): - self.node.deploy_step = {'priority': 100, 'interface': 'bios', - 'step': 'factory_reset', 'argsinfo': {}} - self.node.save() - self._test_step() - - def test_apply_configuration_clean(self): - settings = [{'name': 'ProcVirtualization', 'value': 'Enabled'}] - self.node.clean_step = {'priority': 100, 'interface': 'bios', - 'step': 'apply_configuration', - 'argsinfo': {'settings': settings}} - self.node.save() - self._test_step() - - def test_apply_configuration_deploy(self): - settings = [{'name': 'ProcVirtualization', 'value': 'Enabled'}] - self.node.deploy_step = {'priority': 100, 'interface': 'bios', - 'step': 'apply_configuration', - 'argsinfo': {'settings': settings}} - self.node.save() - self._test_step() - - def test_apply_conf_set_fail(self): - exc = drac_exceptions.BaseClientException('boom') - self.mock_client.set_bios_settings.side_affect = exc - settings = [{'name': 'ProcVirtualization', 'value': 'Enabled'}] - - with task_manager.acquire(self.context, self.node.uuid) as task: - self.assertRaises(exception.DracOperationError, - task.driver.bios.apply_configuration, task, - settings) - - def test_apply_conf_commit_fail(self): - exc = drac_exceptions.BaseClientException('boom') - self.mock_client.commit_pending_bios_changes.side_affect = exc - settings = [{'name': 'ProcVirtualization', 'value': 'Enabled'}] - - with task_manager.acquire(self.context, self.node.uuid) as task: - self.assertRaises(exception.DracOperationError, - task.driver.bios.apply_configuration, task, - settings) - - def test_factory_reset_set_fail(self): - exc = drac_exceptions.BaseClientException('boom') - self.mock_client.set_lifecycle_settings.side_affect = exc - - with task_manager.acquire(self.context, self.node.uuid) as task: - self.assertRaises(exception.DracOperationError, - task.driver.bios.factory_reset, task) - - def test_factory_reset_commit_fail(self): - exc = drac_exceptions.BaseClientException('boom') - self.mock_client.commit_pending_lifecycle_changes.side_affect = exc - - with task_manager.acquire(self.context, self.node.uuid) as task: - self.assertRaises(exception.DracOperationError, - task.driver.bios.factory_reset, task) - - @mock.patch.object(manager_utils, 'notify_conductor_resume_clean', - autospec=True) - @mock.patch.object(drac_job, 'get_job', spec_set=True, - autospec=True) - def test__check_node_bios_jobs(self, mock_get_job, - mock_notify_conductor_resume_clean): - mock_job = mock.Mock() - mock_job.status = 'Completed' - mock_get_job.return_value = mock_job - - with task_manager.acquire(self.context, self.node.uuid) as task: - driver_internal_info = task.node.driver_internal_info - driver_internal_info['bios_config_job_ids'] = ['123', '789'] - task.node.driver_internal_info = driver_internal_info - task.node.clean_step = {'priority': 100, 'interface': 'bios', - 'step': 'factory_reset', 'argsinfo': {}} - task.node.save() - mock_cache = mock.Mock() - task.driver.bios.cache_bios_settings = mock_cache - - task.driver.bios._check_node_bios_jobs(task) - - self.assertEqual([], task.node.driver_internal_info.get( - 'bios_config_job_ids')) - mock_cache.assert_called_once_with(task) - mock_notify_conductor_resume_clean.assert_called_once_with(task) - - @mock.patch.object(drac_job, 'get_job', spec_set=True, - autospec=True) - def test__check_node_bios_jobs_still_running(self, mock_get_job): - mock_job = mock.Mock() - mock_job.status = 'Running' - mock_get_job.return_value = mock_job - - with task_manager.acquire(self.context, self.node.uuid) as task: - driver_internal_info = task.node.driver_internal_info - driver_internal_info['bios_config_job_ids'] = ['123'] - task.node.driver_internal_info = driver_internal_info - task.node.save() - mock_resume = mock.Mock() - task.driver.bios._resume_current_operation = mock_resume - mock_cache = mock.Mock() - task.driver.bios.cache_bios_settings = mock_cache - - task.driver.bios._check_node_bios_jobs(task) - - self.assertEqual(['123'], - task.node.driver_internal_info.get( - 'bios_config_job_ids')) - mock_cache.assert_not_called() - mock_resume.assert_not_called() - - @mock.patch.object(manager_utils, 'cleaning_error_handler', autospec=True) - @mock.patch.object(drac_job, 'get_job', spec_set=True, - autospec=True) - def test__check_node_bios_jobs_failed(self, mock_get_job, - mock_cleaning_error_handler): - mock_job = mock.Mock() - mock_job.status = 'Failed' - mock_job.id = '123' - mock_job.message = 'Invalid' - mock_get_job.return_value = mock_job - - with task_manager.acquire(self.context, self.node.uuid) as task: - driver_internal_info = task.node.driver_internal_info - driver_internal_info['bios_config_job_ids'] = ['123'] - task.node.driver_internal_info = driver_internal_info - task.node.clean_step = {'priority': 100, 'interface': 'bios', - 'step': 'factory_reset', 'argsinfo': {}} - task.node.save() - - task.driver.bios._check_node_bios_jobs(task) - - self.assertEqual([], - task.node.driver_internal_info.get( - 'bios_config_job_ids')) - mock_cleaning_error_handler.assert_called_once_with( - task, mock.ANY, "Failed config job: 123. Message: 'Invalid'.") - - @mock.patch.object(manager_utils, 'cleaning_error_handler', autospec=True) - @mock.patch.object(drac_job, 'get_job', spec_set=True, - autospec=True) - def test__check_node_bios_jobs_completed_with_errors( - self, mock_get_job, mock_cleaning_error_handler): - mock_job = mock.Mock() - mock_job.status = 'Completed with Errors' - mock_job.id = '123' - mock_job.message = 'PR31: Completed with Errors' - mock_get_job.return_value = mock_job - - with task_manager.acquire(self.context, self.node.uuid) as task: - driver_internal_info = task.node.driver_internal_info - driver_internal_info['bios_config_job_ids'] = ['123'] - task.node.driver_internal_info = driver_internal_info - task.node.clean_step = {'priority': 100, 'interface': 'bios', - 'step': 'factory_reset', 'argsinfo': {}} - task.node.save() - - task.driver.bios._check_node_bios_jobs(task) - - self.assertEqual([], - task.node.driver_internal_info.get( - 'bios_config_job_ids')) - mock_cleaning_error_handler.assert_called_once_with( - task, mock.ANY, "Failed config job: 123. Message: " - "'PR31: Completed with Errors'.") - - def test__check_last_system_inventory_changed_different_inventory_time( - self): - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - - driver_internal_info = task.node.driver_internal_info - driver_internal_info["factory_reset_time_before_reboot"] = \ - "20200910233024" - current_time = str(timeutils.utcnow(True)) - driver_internal_info["factory_reset_time"] = current_time - task.node.driver_internal_info = driver_internal_info - task.node.save() - mock_system = mock.Mock() - mock_system.last_system_inventory_time =\ - "20200910233523" - self.mock_client.get_system.return_value = mock_system - mock_resume = mock.Mock() - task.driver.bios._resume_current_operation = mock_resume - mock_cache = mock.Mock() - task.driver.bios.cache_bios_settings = mock_cache - - task.driver.bios._check_last_system_inventory_changed(task) - - self.assertIsNone(task.node.driver_internal_info.get( - 'factory_reset_time_before_reboot')) - self.assertIsNone( - task.node.driver_internal_info.get('factory_reset_time')) - mock_cache.assert_called_once_with(task) - mock_resume.assert_called_once_with(task) - - def test__check_last_system_inventory_changed_same_inventory_time(self): - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - - driver_internal_info = task.node.driver_internal_info - driver_internal_info['factory_reset_time_before_reboot'] = \ - "20200910233024" - current_time = str(timeutils.utcnow(True)) - driver_internal_info['factory_reset_time'] = current_time - task.node.driver_internal_info = driver_internal_info - task.node.save() - mock_system = mock.Mock() - mock_system.last_system_inventory_time =\ - "20200910233024" - self.mock_client.get_system.return_value = mock_system - - task.driver.bios._check_last_system_inventory_changed(task) - - self.assertIsNotNone( - task.node.driver_internal_info.get('factory_reset_time')) - self.assertEqual(current_time, - task.node.driver_internal_info.get( - 'factory_reset_time')) - self.assertEqual("20200910233024", - task.node.driver_internal_info.get( - 'factory_reset_time_before_reboot')) - - def test__check_last_system_inventory_changed_same_inventory_time_timeout( - self): - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - - driver_internal_info = task.node.driver_internal_info - driver_internal_info['factory_reset_time_before_reboot'] = \ - "20200910233024" - driver_internal_info['factory_reset_time'] = \ - '2020-09-25 15:02:57.903318+00:00' - task.node.driver_internal_info = driver_internal_info - task.node.save() - mock_system = mock.Mock() - mock_system.last_system_inventory_time =\ - "20200910233024" - self.mock_client.get_system.return_value = mock_system - mock_failed = mock.Mock() - task.driver.bios._set_failed = mock_failed - - task.driver.bios._check_last_system_inventory_changed(task) - - self.assertIsNone(task.node.driver_internal_info.get( - 'factory_reset_time_before_reboot')) - self.assertIsNone( - task.node.driver_internal_info.get('factory_reset_time')) - fail = ("BIOS factory reset was not completed within 600 " - "seconds, unable to cache updated bios setting") - mock_failed.assert_called_once_with(task, fail) - - @mock.patch.object(task_manager, 'acquire', autospec=True) - def test__query_bios_config_job_status(self, mock_acquire): - driver_internal_info = {'bios_config_job_ids': ['42'], - 'factory_reset_time_before_reboot': - "20200910233024"} - self.node.driver_internal_info = driver_internal_info - self.node.save() - mock_manager = mock.Mock() - node_list = [(self.node.uuid, 'idrac', '', - driver_internal_info)] - mock_manager.iter_nodes.return_value = node_list - # mock task_manager.acquire - task = mock.Mock(node=self.node, driver=mock.Mock(bios=self.bios)) - mock_acquire.return_value = mock.MagicMock( - __enter__=mock.MagicMock(return_value=task)) - self.bios._check_node_bios_jobs = mock.Mock() - self.bios._check_last_system_inventory_changed = mock.Mock() - - self.bios._query_bios_config_job_status(mock_manager, - self.context) - - self.bios._check_node_bios_jobs.assert_called_once_with(task) - self.bios._check_last_system_inventory_changed.assert_called_once_with( - task) - - @mock.patch.object(task_manager, 'acquire', autospec=True) - def test__query_bios_config_job_status_no_config_jobs(self, - mock_acquire): - # mock manager - mock_manager = mock.Mock() - node_list = [(self.node.uuid, 'idrac', '', {})] - mock_manager.iter_nodes.return_value = node_list - # mock task_manager.acquire - task = mock.Mock(node=self.node, driver=mock.Mock(bios=self.bios)) - mock_acquire.return_value = mock.MagicMock( - __enter__=mock.MagicMock(return_value=task)) - self.bios._check_node_bios_jobs = mock.Mock() - self.bios._check_last_system_inventory_changed = mock.Mock() - - self.bios._query_bios_config_job_status(mock_manager, - None) - - self.bios._check_node_bios_jobs.assert_not_called() - self.bios._check_last_system_inventory_changed.assert_not_called() - - @mock.patch.object(task_manager, 'acquire', autospec=True) - def test__query_bios_config_job_status_no_driver(self, - mock_acquire): - driver_internal_info = {'bios_config_job_ids': ['42'], - 'factory_reset_time_before_reboot': - "20200910233024"} - self.node.driver_internal_info = driver_internal_info - self.node.save() - mock_manager = mock.Mock() - node_list = [(self.node.uuid, '', '', driver_internal_info)] - mock_manager.iter_nodes.return_value = node_list - # mock task_manager.acquire - task = mock.Mock(node=self.node, driver=mock.Mock(bios="")) - mock_acquire.return_value = mock.MagicMock( - __enter__=mock.MagicMock(return_value=task)) - self.bios._check_node_bios_jobs = mock.Mock() - self.bios._check_last_system_inventory_changed = mock.Mock() - - self.bios._query_bios_config_job_status(mock_manager, - None) - - self.bios._check_node_bios_jobs.assert_not_called() - self.bios._check_last_system_inventory_changed.assert_not_called() - - -class DracBIOSConfigurationTestCase(test_utils.BaseDracTest): - - def setUp(self): - super(DracBIOSConfigurationTestCase, self).setUp() - self.node = obj_utils.create_test_node(self.context, - driver='idrac', - driver_info=INFO_DICT) - - patch_get_drac_client = mock.patch.object( - drac_common, 'get_drac_client', spec_set=True, autospec=True) - mock_get_drac_client = patch_get_drac_client.start() - self.mock_client = mock.Mock() - mock_get_drac_client.return_value = self.mock_client - self.addCleanup(patch_get_drac_client.stop) - - proc_virt_attr = { - 'current_value': 'Enabled', - 'pending_value': None, - 'read_only': False, - 'possible_values': ['Enabled', 'Disabled']} - mock_proc_virt_attr = mock.NonCallableMock(spec=[], **proc_virt_attr) - mock_proc_virt_attr.name = 'ProcVirtualization' - self.bios_attrs = {'ProcVirtualization': mock_proc_virt_attr} - - def test_get_config(self): - self.mock_client.list_bios_settings.return_value = self.bios_attrs - - with task_manager.acquire(self.context, self.node.uuid, - shared=True) as task: - bios_config = task.driver.vendor.get_bios_config(task) - - self.mock_client.list_bios_settings.assert_called_once_with() - self.assertIn('ProcVirtualization', bios_config) - - def test_get_config_fail(self): - exc = drac_exceptions.BaseClientException('boom') - self.mock_client.list_bios_settings.side_effect = exc - - with task_manager.acquire(self.context, self.node.uuid, - shared=True) as task: - self.assertRaises(exception.DracOperationError, - task.driver.vendor.get_bios_config, task) - - self.mock_client.list_bios_settings.assert_called_once_with() - - def test_set_config(self): - self.mock_client.list_jobs.return_value = [] - - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - task.driver.vendor.set_bios_config(task, - ProcVirtualization='Enabled') - - self.mock_client.list_jobs.assert_called_once_with( - only_unfinished=True) - self.mock_client.set_bios_settings.assert_called_once_with( - {'ProcVirtualization': 'Enabled'}) - - def test_set_config_fail(self): - self.mock_client.list_jobs.return_value = [] - exc = drac_exceptions.BaseClientException('boom') - self.mock_client.set_bios_settings.side_effect = exc - - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - self.assertRaises(exception.DracOperationError, - task.driver.vendor.set_bios_config, task, - ProcVirtualization='Enabled') - - self.mock_client.set_bios_settings.assert_called_once_with( - {'ProcVirtualization': 'Enabled'}) - - def test_commit_config(self): - self.mock_client.list_jobs.return_value = [] - - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - task.driver.vendor.commit_bios_config(task) - - self.mock_client.list_jobs.assert_called_once_with( - only_unfinished=True) - self.mock_client.commit_pending_bios_changes.assert_called_once_with( - False) - - def test_commit_config_with_reboot(self): - self.mock_client.list_jobs.return_value = [] - - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - task.driver.vendor.commit_bios_config(task, reboot=True) - - self.mock_client.list_jobs.assert_called_once_with( - only_unfinished=True) - self.mock_client.commit_pending_bios_changes.assert_called_once_with( - True) - - def test_commit_config_fail(self): - self.mock_client.list_jobs.return_value = [] - exc = drac_exceptions.BaseClientException('boom') - self.mock_client.commit_pending_bios_changes.side_effect = exc - - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - self.assertRaises(exception.DracOperationError, - task.driver.vendor.commit_bios_config, task) - - self.mock_client.list_jobs.assert_called_once_with( - only_unfinished=True) - self.mock_client.commit_pending_bios_changes.assert_called_once_with( - False) - - def test_abandon_config(self): - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - task.driver.vendor.abandon_bios_config(task) - - self.mock_client.abandon_pending_bios_changes.assert_called_once_with() - - def test_abandon_config_fail(self): - exc = drac_exceptions.BaseClientException('boom') - self.mock_client.abandon_pending_bios_changes.side_effect = exc - - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - self.assertRaises(exception.DracOperationError, - task.driver.vendor.abandon_bios_config, task) - - self.mock_client.abandon_pending_bios_changes.assert_called_once_with() diff --git a/ironic/tests/unit/drivers/modules/drac/test_common.py b/ironic/tests/unit/drivers/modules/drac/test_common.py deleted file mode 100644 index ba28141879..0000000000 --- a/ironic/tests/unit/drivers/modules/drac/test_common.py +++ /dev/null @@ -1,125 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Test class for common methods used by DRAC modules. -""" - -from unittest import mock - -import dracclient.client - -from ironic.common import exception -from ironic.drivers.modules.drac import common as drac_common -from ironic.tests.unit.db import utils as db_utils -from ironic.tests.unit.drivers.modules.drac import utils as test_utils -from ironic.tests.unit.objects import utils as obj_utils - -INFO_DICT = db_utils.get_test_drac_info() - - -class DracCommonMethodsTestCase(test_utils.BaseDracTest): - def test_parse_driver_info(self): - node = obj_utils.create_test_node(self.context, - driver='idrac', - driver_info=INFO_DICT) - info = drac_common.parse_driver_info(node) - self.assertEqual(INFO_DICT['drac_address'], info['drac_address']) - self.assertEqual(INFO_DICT['drac_port'], info['drac_port']) - self.assertEqual(INFO_DICT['drac_path'], info['drac_path']) - self.assertEqual(INFO_DICT['drac_protocol'], info['drac_protocol']) - self.assertEqual(INFO_DICT['drac_username'], info['drac_username']) - self.assertEqual(INFO_DICT['drac_password'], info['drac_password']) - - def test_parse_driver_info_missing_host(self): - node = obj_utils.create_test_node(self.context, - driver='idrac', - driver_info=INFO_DICT) - del node.driver_info['drac_address'] - self.assertRaises(exception.InvalidParameterValue, - drac_common.parse_driver_info, node) - - def test_parse_driver_info_missing_port(self): - node = obj_utils.create_test_node(self.context, - driver='idrac', - driver_info=INFO_DICT) - del node.driver_info['drac_port'] - - info = drac_common.parse_driver_info(node) - self.assertEqual(443, info['drac_port']) - - def test_parse_driver_info_invalid_port(self): - node = obj_utils.create_test_node(self.context, - driver='idrac', - driver_info=INFO_DICT) - node.driver_info['drac_port'] = 'foo' - self.assertRaises(exception.InvalidParameterValue, - drac_common.parse_driver_info, node) - - def test_parse_driver_info_missing_path(self): - node = obj_utils.create_test_node(self.context, - driver='idrac', - driver_info=INFO_DICT) - del node.driver_info['drac_path'] - - info = drac_common.parse_driver_info(node) - self.assertEqual('/wsman', info['drac_path']) - - def test_parse_driver_info_missing_protocol(self): - node = obj_utils.create_test_node(self.context, - driver='idrac', - driver_info=INFO_DICT) - del node.driver_info['drac_protocol'] - - info = drac_common.parse_driver_info(node) - self.assertEqual('https', info['drac_protocol']) - - def test_parse_driver_info_invalid_protocol(self): - node = obj_utils.create_test_node(self.context, - driver='idrac', - driver_info=INFO_DICT) - node.driver_info['drac_protocol'] = 'foo' - - self.assertRaises(exception.InvalidParameterValue, - drac_common.parse_driver_info, node) - - def test_parse_driver_info_missing_username(self): - node = obj_utils.create_test_node(self.context, - driver='idrac', - driver_info=INFO_DICT) - del node.driver_info['drac_username'] - self.assertRaises(exception.InvalidParameterValue, - drac_common.parse_driver_info, node) - - def test_parse_driver_info_missing_password(self): - node = obj_utils.create_test_node(self.context, - driver='idrac', - driver_info=INFO_DICT) - del node.driver_info['drac_password'] - self.assertRaises(exception.InvalidParameterValue, - drac_common.parse_driver_info, node) - - def test_get_drac_client(self): - if not mock._is_instance_mock(dracclient.client): - mock.patch.object(dracclient.client, 'DRACClient', - autospec=True).start() - mock_dracclient = dracclient.client.DRACClient - expected_call = mock.call('1.2.3.4', 'admin', 'fake', 443, '/wsman', - 'https') - node = obj_utils.create_test_node(self.context, - driver='idrac', - driver_info=INFO_DICT) - - drac_common.get_drac_client(node) - - self.assertEqual(mock_dracclient.mock_calls, [expected_call]) diff --git a/ironic/tests/unit/drivers/modules/drac/test_inspect.py b/ironic/tests/unit/drivers/modules/drac/test_inspect.py index d98edaa191..41dfd8b330 100644 --- a/ironic/tests/unit/drivers/modules/drac/test_inspect.py +++ b/ironic/tests/unit/drivers/modules/drac/test_inspect.py @@ -17,512 +17,21 @@ Test class for DRAC inspection interface from unittest import mock -from dracclient import exceptions as drac_exceptions from oslo_utils import units import sushy -from ironic.common import exception from ironic.common import states from ironic.conductor import task_manager -from ironic.drivers.modules.drac import common as drac_common from ironic.drivers.modules.drac import inspect as drac_inspect from ironic.drivers.modules import inspect_utils from ironic.drivers.modules.redfish import inspect as redfish_inspect from ironic.drivers.modules.redfish import utils as redfish_utils -from ironic import objects from ironic.tests.unit.drivers.modules.drac import utils as test_utils from ironic.tests.unit.objects import utils as obj_utils INFO_DICT = test_utils.INFO_DICT -class DracInspectionTestCase(test_utils.BaseDracTest): - - def setUp(self): - super(DracInspectionTestCase, self).setUp() - self.node = obj_utils.create_test_node(self.context, - driver='idrac', - driver_info=INFO_DICT) - memory = [{'id': 'DIMM.Socket.A1', - 'size_mb': 16384, - 'speed': 2133, - 'manufacturer': 'Samsung', - 'model': 'DDR4 DIMM', - 'state': 'ok'}, - {'id': 'DIMM.Socket.B1', - 'size_mb': 16384, - 'speed': 2133, - 'manufacturer': 'Samsung', - 'model': 'DDR4 DIMM', - 'state': 'ok'}] - cpus = [{'id': 'CPU.Socket.1', - 'cores': 6, - 'speed': 2400, - 'model': 'Intel(R) Xeon(R) CPU E5-2620 v3 @ 2.40GHz', - 'state': 'ok', - 'ht_enabled': True, - 'turbo_enabled': True, - 'vt_enabled': True, - 'arch64': True}, - {'id': 'CPU.Socket.2', - 'cores': 6, - 'speed': 2400, - 'model': 'Intel(R) Xeon(R) CPU E5-2620 v3 @ 2.40GHz', - 'state': 'ok', - 'ht_enabled': False, - 'turbo_enabled': True, - 'vt_enabled': True, - 'arch64': True}] - virtual_disks = [ - {'id': 'Disk.Virtual.0:RAID.Integrated.1-1', - 'name': 'disk 0', - 'description': 'Virtual Disk 0 on Integrated RAID Controller 1', - 'controller': 'RAID.Integrated.1-1', - 'raid_level': '1', - 'size_mb': 1143552, - 'state': 'ok', - 'raid_state': 'online', - 'span_depth': 1, - 'span_length': 2, - 'pending_operations': None}] - physical_disks = [ - {'id': 'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1', - 'description': ('Disk 1 in Backplane 1 of ' - 'Integrated RAID Controller 1'), - 'controller': 'RAID.Integrated.1-1', - 'manufacturer': 'SEAGATE', - 'model': 'ST600MM0006', - 'media_type': 'hdd', - 'interface_type': 'sas', - 'size_mb': 571776, - 'free_size_mb': 571776, - 'serial_number': 'S0M3EY2Z', - 'firmware_version': 'LS0A', - 'state': 'ok', - 'raid_state': 'ready'}, - {'id': 'Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1', - 'description': ('Disk 1 in Backplane 1 of ' - 'Integrated RAID Controller 1'), - 'controller': 'RAID.Integrated.1-1', - 'manufacturer': 'SEAGATE', - 'model': 'ST600MM0006', - 'media_type': 'hdd', - 'interface_type': 'sas', - 'size_mb': 285888, - 'free_size_mb': 285888, - 'serial_number': 'S0M3EY2Z', - 'firmware_version': 'LS0A', - 'state': 'ok', - 'raid_state': 'ready'}] - nics = [ - {'id': 'NIC.Embedded.1-1-1', - 'mac': 'B0:83:FE:C6:6F:A1', - 'model': 'Broadcom Gigabit Ethernet BCM5720 - B0:83:FE:C6:6F:A1', - 'speed': '1000 Mbps', - 'duplex': 'full duplex', - 'media_type': 'Base T'}, - {'id': 'NIC.Embedded.2-1-1', - 'mac': 'B0:83:FE:C6:6F:A2', - 'model': 'Broadcom Gigabit Ethernet BCM5720 - B0:83:FE:C6:6F:A2', - 'speed': '1000 Mbps', - 'duplex': 'full duplex', - 'media_type': 'Base T'}] - bios_boot_settings = {'BootMode': {'current_value': 'Bios'}} - uefi_boot_settings = {'BootMode': {'current_value': 'Uefi'}, - 'PxeDev1EnDis': {'current_value': 'Enabled'}, - 'PxeDev2EnDis': {'current_value': 'Disabled'}, - 'PxeDev3EnDis': {'current_value': 'Disabled'}, - 'PxeDev4EnDis': {'current_value': 'Disabled'}, - 'PxeDev1Interface': { - 'current_value': 'NIC.Embedded.1-1-1'}, - 'PxeDev2Interface': None, - 'PxeDev3Interface': None, - 'PxeDev4Interface': None} - nic_settings = {'LegacyBootProto': {'current_value': 'PXE'}, - 'FQDD': 'NIC.Embedded.1-1-1'} - video_controllers = [ - {'id': 'Video.Embedded.1-1', - 'description': 'Integrated Matrox G200eW3 Graphics Controller', - 'function_number': 0, - 'manufacturer': 'Matrox Electronics Systems Ltd.', - 'pci_device_id': '0536', - 'pci_vendor_id': '102B', - 'pci_subdevice_id': '0737', - 'pci_subvendor_id': '1028'}, - {'id': 'Video.Slot.7-1', - 'description': 'TU104GL [Tesla T4]', - 'function_number': 0, - 'manufacturer': 'NVIDIA Corporation', - 'pci_device_id': '1EB8', - 'pci_vendor_id': '10DE', - 'pci_subdevice_id': '12A2', - 'pci_subvendor_id': '10DE'}] - - self.memory = [test_utils.dict_to_namedtuple(values=m) for m in memory] - self.cpus = [test_utils.dict_to_namedtuple(values=c) for c in cpus] - self.virtual_disks = [test_utils.dict_to_namedtuple(values=vd) - for vd in virtual_disks] - self.physical_disks = [test_utils.dict_to_namedtuple(values=pd) - for pd in physical_disks] - self.nics = [test_utils.dict_to_namedtuple(values=n) for n in nics] - self.bios_boot_settings = test_utils.dict_of_object(bios_boot_settings) - self.uefi_boot_settings = test_utils.dict_of_object(uefi_boot_settings) - self.nic_settings = test_utils.dict_of_object(nic_settings) - self.video_controllers = [test_utils.dict_to_namedtuple(values=vc) - for vc in video_controllers] - - def test_get_properties(self): - expected = drac_common.COMMON_PROPERTIES - driver = drac_inspect.DracInspect() - self.assertEqual(expected, driver.get_properties()) - - @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) - @mock.patch.object(objects.Port, 'create', spec_set=True, autospec=True) - def test_inspect_hardware(self, mock_port_create, mock_get_drac_client): - expected_node_properties = { - 'memory_mb': 32768, - 'local_gb': 1116, - 'cpu_arch': 'x86_64', - 'capabilities': 'boot_mode:uefi,pci_gpu_devices:1'} - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - mock_client.list_memory.return_value = self.memory - mock_client.list_cpus.return_value = self.cpus - mock_client.list_virtual_disks.return_value = self.virtual_disks - mock_client.list_nics.return_value = self.nics - mock_client.list_bios_settings.return_value = self.uefi_boot_settings - mock_client.list_video_controllers.return_value = \ - self.video_controllers - - with task_manager.acquire(self.context, self.node.uuid, - shared=True) as task: - return_value = task.driver.inspect.inspect_hardware(task) - - self.node.refresh() - self.assertEqual(expected_node_properties, self.node.properties) - self.assertEqual(states.MANAGEABLE, return_value) - self.assertEqual(2, mock_port_create.call_count) - - @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) - @mock.patch.object(objects.Port, 'create', spec_set=True, autospec=True) - def test_inspect_hardware_fail(self, mock_port_create, - mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - mock_client.list_memory.return_value = self.memory - mock_client.list_cpus.return_value = self.cpus - mock_client.list_virtual_disks.side_effect = ( - drac_exceptions.BaseClientException('boom')) - mock_client.list_bios_settings.return_value = self.bios_boot_settings - mock_client.list_video_controllers.return_value = \ - self.video_controllers - - with task_manager.acquire(self.context, self.node.uuid, - shared=True) as task: - self.assertRaises(exception.HardwareInspectionFailure, - task.driver.inspect.inspect_hardware, task) - - @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) - @mock.patch.object(objects.Port, 'create', spec_set=True, autospec=True) - def test_inspect_hardware_no_virtual_disk(self, mock_port_create, - mock_get_drac_client): - expected_node_properties = { - 'memory_mb': 32768, - 'local_gb': 279, - 'cpu_arch': 'x86_64', - 'capabilities': 'boot_mode:uefi,pci_gpu_devices:1'} - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - mock_client.list_memory.return_value = self.memory - mock_client.list_cpus.return_value = self.cpus - mock_client.list_virtual_disks.return_value = [] - mock_client.list_physical_disks.return_value = self.physical_disks - mock_client.list_nics.return_value = self.nics - mock_client.list_bios_settings.return_value = self.uefi_boot_settings - mock_client.list_video_controllers.return_value = \ - self.video_controllers - - with task_manager.acquire(self.context, self.node.uuid, - shared=True) as task: - return_value = task.driver.inspect.inspect_hardware(task) - - self.node.refresh() - self.assertEqual(expected_node_properties, self.node.properties) - self.assertEqual(states.MANAGEABLE, return_value) - self.assertEqual(2, mock_port_create.call_count) - - @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) - @mock.patch.object(objects.Port, 'create', spec_set=True, autospec=True) - def test_inspect_hardware_no_cpu( - self, mock_port_create, mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - mock_client.list_memory.return_value = self.memory - mock_client.list_cpus.return_value = [] - mock_client.list_virtual_disks.return_value = [] - mock_client.list_physical_disks.return_value = self.physical_disks - mock_client.list_nics.return_value = self.nics - mock_client.list_bios_settings.return_value = self.uefi_boot_settings - mock_client.list_video_controllers.return_value = \ - self.video_controllers - - with task_manager.acquire(self.context, self.node.uuid, - shared=True) as task: - self.assertRaises(exception.HardwareInspectionFailure, - task.driver.inspect.inspect_hardware, task) - - @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) - @mock.patch.object(objects.Port, 'create', spec_set=True, autospec=True) - def test_inspect_hardware_no_supported_gpu(self, mock_port_create, - mock_get_drac_client): - controllers = [ - {'id': 'Video.Embedded.1-1', - 'description': 'Integrated Matrox G200eW3 Graphics Controller', - 'function_number': 0, - 'manufacturer': 'Matrox Electronics Systems Ltd.', - 'pci_device_id': '0536', - 'pci_vendor_id': '102B', - 'pci_subdevice_id': '0737', - 'pci_subvendor_id': '1028'}, - {'id': 'Video.Slot.7-1', - 'description': 'GV100 [TITAN V]', - 'function_number': 0, - 'manufacturer': 'NVIDIA Corporation', - 'pci_device_id': '1D81', - 'pci_vendor_id': '10DE', - 'pci_subdevice_id': '1214', - 'pci_subvendor_id': '10DE'}] - - expected_node_properties = { - 'memory_mb': 32768, - 'local_gb': 279, - 'cpu_arch': 'x86_64', - 'capabilities': 'boot_mode:uefi,pci_gpu_devices:0'} - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - mock_client.list_memory.return_value = self.memory - mock_client.list_cpus.return_value = self.cpus - mock_client.list_virtual_disks.return_value = [] - mock_client.list_physical_disks.return_value = self.physical_disks - mock_client.list_nics.return_value = self.nics - mock_client.list_bios_settings.return_value = self.uefi_boot_settings - video_controllers = [test_utils.dict_to_namedtuple(values=vc) - for vc in controllers] - mock_client.list_video_controllers.return_value = video_controllers - - with task_manager.acquire(self.context, self.node.uuid, - shared=True) as task: - return_value = task.driver.inspect.inspect_hardware(task) - - self.node.refresh() - self.assertEqual(expected_node_properties, self.node.properties) - self.assertEqual(states.MANAGEABLE, return_value) - self.assertEqual(2, mock_port_create.call_count) - - @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) - @mock.patch.object(objects.Port, 'create', spec_set=True, autospec=True) - def test_inspect_hardware_multiple_supported_gpu(self, mock_port_create, - mock_get_drac_client): - controllers = [ - {'id': 'Video.Slot.7-1', - 'description': 'TU104GL [Tesla T4]', - 'function_number': 0, - 'manufacturer': 'NVIDIA Corporation', - 'pci_device_id': '1EB8', - 'pci_vendor_id': '10DE', - 'pci_subdevice_id': '12A2', - 'pci_subvendor_id': '10DE'}, - {'id': 'Video.Slot.8-1', - 'description': 'GV100GL [Tesla V100 PCIe 16GB]', - 'function_number': 0, - 'manufacturer': 'NVIDIA Corporation', - 'pci_device_id': '1DB4', - 'pci_vendor_id': '10DE', - 'pci_subdevice_id': '1214', - 'pci_subvendor_id': '10DE'}] - - expected_node_properties = { - 'memory_mb': 32768, - 'local_gb': 279, - 'cpu_arch': 'x86_64', - 'capabilities': 'boot_mode:uefi,pci_gpu_devices:2'} - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - mock_client.list_memory.return_value = self.memory - mock_client.list_cpus.return_value = self.cpus - mock_client.list_virtual_disks.return_value = [] - mock_client.list_physical_disks.return_value = self.physical_disks - mock_client.list_nics.return_value = self.nics - mock_client.list_bios_settings.return_value = self.uefi_boot_settings - video_controllers = [test_utils.dict_to_namedtuple(values=vc) - for vc in controllers] - mock_client.list_video_controllers.return_value = video_controllers - - with task_manager.acquire(self.context, self.node.uuid, - shared=True) as task: - return_value = task.driver.inspect.inspect_hardware(task) - - self.node.refresh() - self.assertEqual(expected_node_properties, self.node.properties) - self.assertEqual(states.MANAGEABLE, return_value) - self.assertEqual(2, mock_port_create.call_count) - - @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) - @mock.patch.object(objects.Port, 'create', spec_set=True, autospec=True) - def test_inspect_hardware_no_gpu(self, mock_port_create, - mock_get_drac_client): - expected_node_properties = { - 'memory_mb': 32768, - 'local_gb': 279, - 'cpu_arch': 'x86_64', - 'capabilities': 'boot_mode:uefi,pci_gpu_devices:0'} - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - mock_client.list_memory.return_value = self.memory - mock_client.list_cpus.return_value = self.cpus - mock_client.list_virtual_disks.return_value = [] - mock_client.list_physical_disks.return_value = self.physical_disks - mock_client.list_nics.return_value = self.nics - mock_client.list_bios_settings.return_value = self.uefi_boot_settings - mock_client.list_video_controllers.return_value = [] - - with task_manager.acquire(self.context, self.node.uuid, - shared=True) as task: - return_value = task.driver.inspect.inspect_hardware(task) - - self.node.refresh() - self.assertEqual(expected_node_properties, self.node.properties) - self.assertEqual(states.MANAGEABLE, return_value) - self.assertEqual(2, mock_port_create.call_count) - - @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) - @mock.patch.object(objects.Port, 'create', spec_set=True, autospec=True) - def test_inspect_hardware_with_existing_ports(self, mock_port_create, - mock_get_drac_client): - expected_node_properties = { - 'memory_mb': 32768, - 'local_gb': 1116, - 'cpu_arch': 'x86_64', - 'capabilities': 'boot_mode:uefi,pci_gpu_devices:1'} - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - mock_client.list_memory.return_value = self.memory - mock_client.list_cpus.return_value = self.cpus - mock_client.list_virtual_disks.return_value = self.virtual_disks - mock_client.list_nics.return_value = self.nics - mock_client.list_bios_settings.return_value = self.uefi_boot_settings - mock_client.list_video_controllers.return_value = \ - self.video_controllers - - mock_port_create.side_effect = exception.MACAlreadyExists("boom") - - with task_manager.acquire(self.context, self.node.uuid, - shared=True) as task: - return_value = task.driver.inspect.inspect_hardware(task) - - self.node.refresh() - self.assertEqual(expected_node_properties, self.node.properties) - self.assertEqual(states.MANAGEABLE, return_value) - self.assertEqual(2, mock_port_create.call_count) - - def test__guess_root_disk(self): - with task_manager.acquire(self.context, self.node.uuid, - shared=True) as task: - root_disk = task.driver.inspect._guess_root_disk( - self.physical_disks) - - self.assertEqual(285888, root_disk.size_mb) - - @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) - def test__get_pxe_dev_nics_with_UEFI_boot_mode(self, mock_get_drac_client): - expected_pxe_nic = self.uefi_boot_settings[ - 'PxeDev1Interface'].current_value - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - mock_client.list_bios_settings.return_value = self.uefi_boot_settings - with task_manager.acquire(self.context, self.node.uuid, - shared=True) as task: - pxe_dev_nics = task.driver.inspect._get_pxe_dev_nics( - mock_client, self.nics, self.node) - - self.assertEqual(expected_pxe_nic, pxe_dev_nics[0]) - - @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) - def test__get_pxe_dev_nics_with_BIOS_boot_mode(self, mock_get_drac_client): - expected_pxe_nic = self.nic_settings['FQDD'] - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - mock_client.list_bios_settings.return_value = self.bios_boot_settings - mock_client.list_nic_settings.return_value = self.nic_settings - with task_manager.acquire(self.context, self.node.uuid, - shared=True) as task: - pxe_dev_nics = task.driver.inspect._get_pxe_dev_nics( - mock_client, self.nics, self.node) - - self.assertEqual(expected_pxe_nic, pxe_dev_nics[0]) - - @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) - def test__get_pxe_dev_nics_list_boot_setting_failure(self, - mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - mock_client.list_bios_settings.side_effect = ( - drac_exceptions.BaseClientException('foo')) - - with task_manager.acquire(self.context, self.node.uuid, - shared=True) as task: - self.assertRaises(exception.HardwareInspectionFailure, - task.driver.inspect._get_pxe_dev_nics, - mock_client, - self.nics, - self.node) - - @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) - def test__get_pxe_dev_nics_list_nic_setting_failure(self, - mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - mock_client.list_bios_settings.return_value = self.bios_boot_settings - mock_client.list_nic_settings.side_effect = ( - drac_exceptions.BaseClientException('bar')) - - with task_manager.acquire(self.context, self.node.uuid, - shared=True) as task: - self.assertRaises(exception.HardwareInspectionFailure, - task.driver.inspect._get_pxe_dev_nics, - mock_client, - self.nics, - self.node) - - @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) - def test__get_pxe_dev_nics_with_empty_list(self, mock_get_drac_client): - expected_pxe_nic = [] - nic_setting = [] - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - mock_client.list_bios_settings.return_value = self.bios_boot_settings - mock_client.list_nic_settings.return_value = nic_setting - with task_manager.acquire(self.context, self.node.uuid, - shared=True) as task: - pxe_dev_nics = task.driver.inspect._get_pxe_dev_nics( - mock_client, self.nics, self.node) - - self.assertEqual(expected_pxe_nic, pxe_dev_nics) - - class DracRedfishInspectionTestCase(test_utils.BaseDracTest): def setUp(self): super(DracRedfishInspectionTestCase, self).setUp() diff --git a/ironic/tests/unit/drivers/modules/drac/test_job.py b/ironic/tests/unit/drivers/modules/drac/test_job.py deleted file mode 100644 index a0ea526cbc..0000000000 --- a/ironic/tests/unit/drivers/modules/drac/test_job.py +++ /dev/null @@ -1,175 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Test class for DRAC job specific methods -""" - -from unittest import mock - -from dracclient import exceptions as drac_exceptions - -from ironic.common import exception -from ironic.conductor import task_manager -from ironic.drivers.modules.drac import common as drac_common -from ironic.drivers.modules.drac import job as drac_job -from ironic.tests.unit.drivers.modules.drac import utils as test_utils -from ironic.tests.unit.objects import utils as obj_utils - -INFO_DICT = test_utils.INFO_DICT - - -@mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) -class DracJobTestCase(test_utils.BaseDracTest): - - def setUp(self): - super(DracJobTestCase, self).setUp() - self.node = obj_utils.create_test_node(self.context, - driver='idrac', - driver_info=INFO_DICT) - self.job_dict = { - 'id': 'JID_001436912645', - 'name': 'ConfigBIOS:BIOS.Setup.1-1', - 'start_time': '00000101000000', - 'until_time': 'TIME_NA', - 'message': 'Job in progress', - 'status': 'Running', - 'percent_complete': 34} - self.job = test_utils.make_job(self.job_dict) - - def test_get_job(self, mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - mock_client.get_job.return_value = self.job - - job = drac_job.get_job(self.node, 'foo') - - mock_client.get_job.assert_called_once_with('foo') - self.assertEqual(self.job, job) - - def test_get_job_fail(self, mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - exc = exception.DracOperationError('boom') - mock_client.get_job.side_effect = exc - - self.assertRaises(exception.DracOperationError, - drac_job.get_job, self.node, 'foo') - - def test_list_unfinished_jobs(self, mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - mock_client.list_jobs.return_value = [self.job] - - jobs = drac_job.list_unfinished_jobs(self.node) - - mock_client.list_jobs.assert_called_once_with(only_unfinished=True) - self.assertEqual([self.job], jobs) - - def test_list_unfinished_jobs_fail(self, mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - exc = exception.DracOperationError('boom') - mock_client.list_jobs.side_effect = exc - - self.assertRaises(exception.DracOperationError, - drac_job.list_unfinished_jobs, self.node) - - def test_validate_job_queue(self, mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - mock_client.list_jobs.return_value = [] - - drac_job.validate_job_queue(self.node) - - mock_client.list_jobs.assert_called_once_with(only_unfinished=True) - - def test_validate_job_queue_fail(self, mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - exc = drac_exceptions.BaseClientException('boom') - mock_client.list_jobs.side_effect = exc - - self.assertRaises(exception.DracOperationError, - drac_job.validate_job_queue, self.node) - - def test_validate_job_queue_invalid(self, mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - mock_client.list_jobs.return_value = [self.job] - - self.assertRaises(exception.DracOperationError, - drac_job.validate_job_queue, self.node) - - def test_validate_job_queue_name_prefix(self, mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - mock_client.list_jobs.return_value = [self.job] - - drac_job.validate_job_queue(self.node, name_prefix='Fake') - - mock_client.list_jobs.assert_called_once_with(only_unfinished=True) - - def test_validate_job_queue_name_prefix_invalid(self, - mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - mock_client.list_jobs.return_value = [self.job] - - self.assertRaises(exception.DracOperationError, - drac_job.validate_job_queue, self.node, - name_prefix='ConfigBIOS') - - -@mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) -class DracVendorPassthruJobTestCase(test_utils.BaseDracTest): - - def setUp(self): - super(DracVendorPassthruJobTestCase, self).setUp() - self.node = obj_utils.create_test_node(self.context, - driver='idrac', - driver_info=INFO_DICT) - self.job_dict = { - 'id': 'JID_001436912645', - 'name': 'ConfigBIOS:BIOS.Setup.1-1', - 'start_time': '00000101000000', - 'until_time': 'TIME_NA', - 'message': 'Job in progress', - 'status': 'Running', - 'percent_complete': 34} - self.job = test_utils.make_job(self.job_dict) - - def test_list_unfinished_jobs(self, mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - mock_client.list_jobs.return_value = [self.job] - - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - resp = task.driver.vendor.list_unfinished_jobs(task) - - mock_client.list_jobs.assert_called_once_with(only_unfinished=True) - self.assertEqual([self.job_dict], resp['unfinished_jobs']) - - def test_list_unfinished_jobs_fail(self, mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - exc = exception.DracOperationError('boom') - mock_client.list_jobs.side_effect = exc - - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - self.assertRaises(exception.DracOperationError, - task.driver.vendor.list_unfinished_jobs, task) diff --git a/ironic/tests/unit/drivers/modules/drac/test_management.py b/ironic/tests/unit/drivers/modules/drac/test_management.py index 6d693fcf0b..9b4d22369d 100644 --- a/ironic/tests/unit/drivers/modules/drac/test_management.py +++ b/ironic/tests/unit/drivers/modules/drac/test_management.py @@ -23,7 +23,6 @@ Test class for DRAC management interface import json from unittest import mock -from oslo_utils import importutils import sushy import ironic.common.boot_devices @@ -34,808 +33,16 @@ from ironic.conductor import periodics from ironic.conductor import task_manager from ironic.conductor import utils as manager_utils from ironic.drivers.modules import deploy_utils -from ironic.drivers.modules.drac import common as drac_common -from ironic.drivers.modules.drac import job as drac_job from ironic.drivers.modules.drac import management as drac_mgmt from ironic.drivers.modules.drac import utils as drac_utils from ironic.drivers.modules.redfish import utils as redfish_utils from ironic.tests.unit.drivers.modules.drac import utils as test_utils from ironic.tests.unit.objects import utils as obj_utils -dracclient_exceptions = importutils.try_import('dracclient.exceptions') INFO_DICT = test_utils.INFO_DICT -@mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) -class DracManagementInternalMethodsTestCase(test_utils.BaseDracTest): - - def boot_modes(self, *next_modes): - modes = [ - {'id': 'IPL', 'name': 'BootSeq', - 'is_current': True, 'is_next': False}, - {'id': 'OneTime', 'name': 'OneTimeBootMode', - 'is_current': False, 'is_next': False}] - for mode in modes: - if mode['id'] in next_modes: - mode['is_next'] = True - return [test_utils.dict_to_namedtuple(values=mode) for mode in modes] - - def setUp(self): - super(DracManagementInternalMethodsTestCase, self).setUp() - self.node = obj_utils.create_test_node(self.context, - driver='idrac', - driver_info=INFO_DICT) - - boot_device_ipl_pxe = { - 'id': 'BIOS.Setup.1-1#BootSeq#NIC.Embedded.1-1-1', - 'boot_mode': 'IPL', - 'current_assigned_sequence': 0, - 'pending_assigned_sequence': 0, - 'bios_boot_string': 'Embedded NIC 1 Port 1 Partition 1'} - boot_device_ipl_disk = { - 'id': 'BIOS.Setup.1-1#BootSeq#HardDisk.List.1-1', - 'boot_mode': 'IPL', - 'current_assigned_sequence': 1, - 'pending_assigned_sequence': 1, - 'bios_boot_string': 'Hard drive C: BootSeq'} - ipl_boot_device_namedtuples = [ - test_utils.dict_to_namedtuple(values=boot_device_ipl_pxe), - test_utils.dict_to_namedtuple(values=boot_device_ipl_disk)] - ipl_boot_devices = {'IPL': ipl_boot_device_namedtuples, - 'OneTime': ipl_boot_device_namedtuples} - - boot_device_uefi_pxe = { - 'id': 'UEFI:BIOS.Setup.1-1#UefiBootSeq#NIC.PxeDevice.1-1', - 'boot_mode': 'UEFI', - 'current_assigned_sequence': 0, - 'pending_assigned_sequence': 0, - 'bios_boot_string': - 'PXE Device 1: Integrated NIC 1 Port 1 Partition 1'} - uefi_boot_device_namedtuples = [ - test_utils.dict_to_namedtuple(values=boot_device_uefi_pxe)] - uefi_boot_devices = {'UEFI': uefi_boot_device_namedtuples, - 'OneTime': uefi_boot_device_namedtuples} - - self.boot_devices = {'IPL': ipl_boot_devices, - 'UEFI': uefi_boot_devices} - - def test__get_boot_device(self, mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - mock_client.list_boot_modes.return_value = self.boot_modes('IPL') - mock_client.list_boot_devices.return_value = self.boot_devices['IPL'] - - boot_device = drac_mgmt._get_boot_device(self.node) - - expected_boot_device = {'boot_device': 'pxe', 'persistent': True} - self.assertEqual(expected_boot_device, boot_device) - mock_client.list_boot_modes.assert_called_once_with() - mock_client.list_boot_devices.assert_called_once_with() - - def test__get_boot_device_not_persistent(self, mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - # if a non-persistent boot mode is marked as "next", it over-rides any - # persistent boot modes - mock_client.list_boot_modes.return_value = self.boot_modes('IPL', - 'OneTime') - mock_client.list_boot_devices.return_value = self.boot_devices['IPL'] - - boot_device = drac_mgmt._get_boot_device(self.node) - - expected_boot_device = {'boot_device': 'pxe', 'persistent': False} - self.assertEqual(expected_boot_device, boot_device) - mock_client.list_boot_modes.assert_called_once_with() - mock_client.list_boot_devices.assert_called_once_with() - - def test__get_boot_device_with_no_boot_device(self, - mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - mock_client.list_boot_modes.return_value = self.boot_modes('IPL') - mock_client.list_boot_devices.return_value = {} - - boot_device = drac_mgmt._get_boot_device(self.node) - - expected_boot_device = {'boot_device': None, 'persistent': True} - self.assertEqual(expected_boot_device, boot_device) - mock_client.list_boot_modes.assert_called_once_with() - mock_client.list_boot_devices.assert_called_once_with() - - def test__get_boot_device_with_empty_boot_mode_list(self, - mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - mock_client.list_boot_modes.return_value = [] - - self.assertRaises(exception.DracOperationError, - drac_mgmt._get_boot_device, self.node) - - def test__get_next_persistent_boot_mode(self, mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - mock_client.list_boot_modes.return_value = self.boot_modes('IPL') - - boot_mode = drac_mgmt._get_next_persistent_boot_mode(self.node) - - mock_get_drac_client.assert_called_once_with(self.node) - mock_client.list_boot_modes.assert_called_once_with() - expected_boot_mode = 'IPL' - self.assertEqual(expected_boot_mode, boot_mode) - - def test__get_next_persistent_boot_mode_with_non_persistent_boot_mode( - self, mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - mock_client.list_boot_modes.return_value = self.boot_modes('IPL', - 'OneTime') - - boot_mode = drac_mgmt._get_next_persistent_boot_mode(self.node) - - mock_get_drac_client.assert_called_once_with(self.node) - mock_client.list_boot_modes.assert_called_once_with() - expected_boot_mode = 'IPL' - self.assertEqual(expected_boot_mode, boot_mode) - - def test__get_next_persistent_boot_mode_list_boot_modes_fail( - self, mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - exc = dracclient_exceptions.BaseClientException('boom') - mock_client.list_boot_modes.side_effect = exc - - self.assertRaises(exception.DracOperationError, - drac_mgmt._get_next_persistent_boot_mode, self.node) - - mock_get_drac_client.assert_called_once_with(self.node) - mock_client.list_boot_modes.assert_called_once_with() - - def test__get_next_persistent_boot_mode_with_empty_boot_mode_list( - self, mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - mock_client.list_boot_modes.return_value = [] - - self.assertRaises(exception.DracOperationError, - drac_mgmt._get_next_persistent_boot_mode, self.node) - - mock_get_drac_client.assert_called_once_with(self.node) - mock_client.list_boot_modes.assert_called_once_with() - - def test__is_boot_order_flexibly_programmable(self, mock_get_drac_client): - self.assertTrue(drac_mgmt._is_boot_order_flexibly_programmable( - persistent=True, bios_settings={'SetBootOrderFqdd1': ()})) - - def test__is_boot_order_flexibly_programmable_not_persistent( - self, mock_get_drac_client): - self.assertFalse(drac_mgmt._is_boot_order_flexibly_programmable( - persistent=False, bios_settings={'SetBootOrderFqdd1': ()})) - - def test__is_boot_order_flexibly_programmable_with_no_bios_setting( - self, mock_get_drac_client): - self.assertFalse(drac_mgmt._is_boot_order_flexibly_programmable( - persistent=True, bios_settings={})) - - def test__flexibly_program_boot_order_for_disk_and_bios( - self, mock_get_drac_client): - settings = drac_mgmt._flexibly_program_boot_order( - ironic.common.boot_devices.DISK, drac_boot_mode='Bios') - - expected_settings = {'SetBootOrderFqdd1': 'HardDisk.List.1-1'} - self.assertEqual(expected_settings, settings) - - def test__flexibly_program_boot_order_for_disk_and_uefi( - self, mock_get_drac_client): - settings = drac_mgmt._flexibly_program_boot_order( - ironic.common.boot_devices.DISK, drac_boot_mode='Uefi') - - expected_settings = { - 'SetBootOrderFqdd1': '*.*.*', - 'SetBootOrderFqdd2': 'NIC.*.*', - 'SetBootOrderFqdd3': 'Optical.*.*', - 'SetBootOrderFqdd4': 'Floppy.*.*', - } - self.assertEqual(expected_settings, settings) - - def test__flexibly_program_boot_order_for_pxe(self, mock_get_drac_client): - settings = drac_mgmt._flexibly_program_boot_order( - ironic.common.boot_devices.PXE, drac_boot_mode='Uefi') - - expected_settings = {'SetBootOrderFqdd1': 'NIC.*.*'} - self.assertEqual(expected_settings, settings) - - def test__flexibly_program_boot_order_for_cdrom(self, - mock_get_drac_client): - settings = drac_mgmt._flexibly_program_boot_order( - ironic.common.boot_devices.CDROM, drac_boot_mode='Uefi') - - expected_settings = {'SetBootOrderFqdd1': 'Optical.*.*'} - self.assertEqual(expected_settings, settings) - - @mock.patch.object(drac_mgmt, '_get_next_persistent_boot_mode', - spec_set=True, autospec=True) - @mock.patch.object(drac_mgmt, '_get_boot_device', spec_set=True, - autospec=True) - @mock.patch.object(drac_job, 'list_unfinished_jobs', spec_set=True, - autospec=True) - @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, - autospec=True) - def test_set_boot_device(self, mock_validate_job_queue, - mock_list_unfinished_jobs, - mock__get_boot_device, - mock__get_next_persistent_boot_mode, - mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - mock_client.list_boot_devices.return_value = self.boot_devices['IPL'] - mock_list_unfinished_jobs.return_value = [] - - mock_job = mock.Mock() - mock_job.status = "Scheduled" - mock_client.get_job.return_value = mock_job - - boot_device = {'boot_device': ironic.common.boot_devices.DISK, - 'persistent': True} - mock__get_boot_device.return_value = boot_device - mock__get_next_persistent_boot_mode.return_value = 'IPL' - self.node.driver_internal_info['clean_steps'] = [] - - boot_device = drac_mgmt.set_boot_device( - self.node, ironic.common.boot_devices.PXE, persistent=False) - - self.assertEqual(0, mock_list_unfinished_jobs.call_count) - self.assertEqual(0, mock_client.delete_jobs.call_count) - mock_validate_job_queue.assert_called_once_with( - self.node, name_prefix="Configure: BIOS") - mock_client.change_boot_device_order.assert_called_once_with( - 'OneTime', 'BIOS.Setup.1-1#BootSeq#NIC.Embedded.1-1-1') - self.assertEqual(0, mock_client.set_bios_settings.call_count) - mock_client.commit_pending_bios_changes.assert_called_once_with() - - @mock.patch.object(drac_mgmt, '_get_next_persistent_boot_mode', - spec_set=True, autospec=True) - @mock.patch.object(drac_mgmt, '_get_boot_device', spec_set=True, - autospec=True) - @mock.patch.object(drac_job, 'list_unfinished_jobs', spec_set=True, - autospec=True) - def test_set_boot_device_called_with_no_change( - self, mock_list_unfinished_jobs, mock__get_boot_device, - mock__get_next_persistent_boot_mode, mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - mock_client.list_boot_devices.return_value = self.boot_devices['IPL'] - boot_device = {'boot_device': ironic.common.boot_devices.PXE, - 'persistent': True} - mock__get_boot_device.return_value = boot_device - mock__get_next_persistent_boot_mode.return_value = 'IPL' - mock_list_unfinished_jobs.return_value = [] - - boot_device = drac_mgmt.set_boot_device( - self.node, ironic.common.boot_devices.PXE, persistent=True) - - mock_list_unfinished_jobs.assert_called_once_with(self.node) - self.assertEqual(0, mock_client.change_boot_device_order.call_count) - self.assertEqual(0, mock_client.set_bios_settings.call_count) - self.assertEqual(0, mock_client.commit_pending_bios_changes.call_count) - - @mock.patch.object(drac_mgmt, '_flexibly_program_boot_order', - spec_set=True, autospec=True) - @mock.patch.object(drac_mgmt, '_is_boot_order_flexibly_programmable', - spec_set=True, autospec=True) - @mock.patch.object(drac_mgmt, '_get_next_persistent_boot_mode', - spec_set=True, autospec=True) - @mock.patch.object(drac_mgmt, '_get_boot_device', spec_set=True, - autospec=True) - @mock.patch.object(drac_job, 'list_unfinished_jobs', spec_set=True, - autospec=True) - def test_set_boot_device_called_with_no_drac_boot_device( - self, mock_list_unfinished_jobs, - mock__get_boot_device, mock__get_next_persistent_boot_mode, - mock__is_boot_order_flexibly_programmable, - mock__flexibly_program_boot_order, - mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - mock_client.list_boot_devices.return_value = self.boot_devices['UEFI'] - mock_list_unfinished_jobs.return_value = [] - - mock_job = mock.Mock() - mock_job.status = "Scheduled" - mock_client.get_job.return_value = mock_job - boot_device = {'boot_device': ironic.common.boot_devices.PXE, - 'persistent': False} - mock__get_boot_device.return_value = boot_device - mock__get_next_persistent_boot_mode.return_value = 'UEFI' - settings = [ - { - 'name': 'BootMode', - 'instance_id': 'BIOS.Setup.1-1:BootMode', - 'current_value': 'Uefi', - 'pending_value': None, - 'read_only': False, - 'possible_values': ['Bios', 'Uefi'] - }, - ] - bios_settings = { - s['name']: test_utils.dict_to_namedtuple( - values=s) for s in settings} - mock_client.list_bios_settings.return_value = bios_settings - mock__is_boot_order_flexibly_programmable.return_value = True - flexibly_program_settings = { - 'SetBootOrderFqdd1': '*.*.*', - 'SetBootOrderFqdd2': 'NIC.*.*', - 'SetBootOrderFqdd3': 'Optical.*.*', - 'SetBootOrderFqdd4': 'Floppy.*.*', - } - mock__flexibly_program_boot_order.return_value = \ - flexibly_program_settings - - drac_mgmt.set_boot_device(self.node, ironic.common.boot_devices.DISK, - persistent=True) - - mock_list_unfinished_jobs.assert_called_once_with(self.node) - self.assertEqual(0, mock_client.change_boot_device_order.call_count) - mock_client.set_bios_settings.assert_called_once_with( - flexibly_program_settings) - mock_client.commit_pending_bios_changes.assert_called_once_with() - - @mock.patch.object(drac_mgmt, '_is_boot_order_flexibly_programmable', - spec_set=True, autospec=True) - @mock.patch.object(drac_mgmt, '_get_next_persistent_boot_mode', - spec_set=True, autospec=True) - @mock.patch.object(drac_mgmt, '_get_boot_device', spec_set=True, - autospec=True) - @mock.patch.object(drac_job, 'list_unfinished_jobs', spec_set=True, - autospec=True) - def test_set_boot_device_called_with_not_flexibly_programmable( - self, mock_list_unfinished_jobs, - mock__get_boot_device, mock__get_next_persistent_boot_mode, - mock__is_boot_order_flexibly_programmable, - mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - mock_list_unfinished_jobs.return_value = [] - mock_client.list_boot_devices.return_value = self.boot_devices['UEFI'] - boot_device = {'boot_device': ironic.common.boot_devices.PXE, - 'persistent': False} - mock__get_boot_device.return_value = boot_device - mock__get_next_persistent_boot_mode.return_value = 'UEFI' - mock__is_boot_order_flexibly_programmable.return_value = False - - self.assertRaises(exception.InvalidParameterValue, - drac_mgmt.set_boot_device, self.node, - ironic.common.boot_devices.CDROM, persistent=False) - - mock_list_unfinished_jobs.assert_called_once_with(self.node) - self.assertEqual(0, mock_client.change_boot_device_order.call_count) - self.assertEqual(0, mock_client.set_bios_settings.call_count) - self.assertEqual(0, mock_client.commit_pending_bios_changes.call_count) - - @mock.patch.object(drac_mgmt, '_is_boot_order_flexibly_programmable', - spec_set=True, autospec=True) - @mock.patch.object(drac_mgmt, '_get_next_persistent_boot_mode', - spec_set=True, autospec=True) - @mock.patch.object(drac_mgmt, '_get_boot_device', spec_set=True, - autospec=True) - @mock.patch.object(drac_job, 'list_unfinished_jobs', spec_set=True, - autospec=True) - def test_set_boot_device_called_with_unknown_boot_mode( - self, mock_list_unfinished_jobs, mock__get_boot_device, - mock__get_next_persistent_boot_mode, - mock__is_boot_order_flexibly_programmable, - mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - - mock_client.list_boot_devices.return_value = self.boot_devices['UEFI'] - boot_device = {'boot_device': ironic.common.boot_devices.PXE, - 'persistent': False} - mock__get_boot_device.return_value = boot_device - mock__get_next_persistent_boot_mode.return_value = 'UEFI' - settings = [ - { - 'name': 'BootMode', - 'instance_id': 'BIOS.Setup.1-1:BootMode', - 'current_value': 'Bad', - 'pending_value': None, - 'read_only': False, - 'possible_values': ['Bios', 'Uefi', 'Bad'] - }, - ] - bios_settings = { - s['name']: test_utils.dict_to_namedtuple( - values=s) for s in settings} - mock_client.list_bios_settings.return_value = bios_settings - mock__is_boot_order_flexibly_programmable.return_value = True - mock_list_unfinished_jobs.return_value = [] - self.assertRaises(exception.DracOperationError, - drac_mgmt.set_boot_device, self.node, - ironic.common.boot_devices.DISK, persistent=True) - mock_list_unfinished_jobs.assert_called_once_with(self.node) - self.assertEqual(0, mock_client.change_boot_device_order.call_count) - self.assertEqual(0, mock_client.set_bios_settings.call_count) - self.assertEqual(0, mock_client.commit_pending_bios_changes.call_count) - - @mock.patch('time.time', autospec=True) - @mock.patch('time.sleep', autospec=True) - @mock.patch.object(drac_mgmt, '_get_next_persistent_boot_mode', - spec_set=True, autospec=True) - @mock.patch.object(drac_mgmt, '_get_boot_device', spec_set=True, - autospec=True) - @mock.patch.object(drac_job, 'list_unfinished_jobs', spec_set=True, - autospec=True) - def test_set_boot_device_job_not_scheduled( - self, - mock_list_unfinished_jobs, - mock__get_boot_device, - mock__get_next_persistent_boot_mode, - mock_sleep, - mock_time, - mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - mock_list_unfinished_jobs.return_value = [] - mock_client.list_boot_devices.return_value = self.boot_devices['IPL'] - mock_job = mock.Mock() - mock_job.status = "New" - mock_client.get_job.return_value = mock_job - mock_time.side_effect = [10, 50] - - boot_device = {'boot_device': ironic.common.boot_devices.DISK, - 'persistent': True} - mock__get_boot_device.return_value = boot_device - mock__get_next_persistent_boot_mode.return_value = 'IPL' - - self.assertRaises(exception.DracOperationError, - drac_mgmt.set_boot_device, self.node, - ironic.common.boot_devices.PXE, - persistent=True) - mock_list_unfinished_jobs.assert_called_once_with(self.node) - - @mock.patch.object(drac_job, 'list_unfinished_jobs', spec_set=True, - autospec=True) - def test_set_boot_device_with_list_unfinished_jobs_fail( - self, mock_list_unfinished_jobs, mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - - mock_list_unfinished_jobs.side_effect = exception.DracOperationError( - 'boom') - - self.assertRaises(exception.DracOperationError, - drac_mgmt.set_boot_device, self.node, - ironic.common.boot_devices.PXE, persistent=True) - - self.assertEqual(0, mock_client.change_boot_device_order.call_count) - self.assertEqual(0, mock_client.set_bios_settings.call_count) - self.assertEqual(0, mock_client.commit_pending_bios_changes.call_count) - - @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, - autospec=True) - @mock.patch.object(drac_job, 'list_unfinished_jobs', spec_set=True, - autospec=True) - @mock.patch.object(drac_mgmt, '_get_boot_device', spec_set=True, - autospec=True) - @mock.patch.object(drac_mgmt, '_get_next_persistent_boot_mode', - spec_set=True, autospec=True) - def test_set_boot_device_with_list_unfinished_jobs_without_clean_step( - self, mock__get_next_persistent_boot_mode, mock__get_boot_device, - mock_list_unfinished_jobs, mock_validate_job_queue, - mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - - bios_job_dict = { - 'id': 'JID_602553293345', - 'name': 'ConfigBIOS:BIOS.Setup.1-1', - 'start_time': 'TIME_NOW', - 'until_time': 'TIME_NA', - 'message': 'Task successfully scheduled.', - 'status': 'Scheduled', - 'percent_complete': 0} - bios_job = test_utils.make_job(bios_job_dict) - - mock_list_unfinished_jobs.return_value = [bios_job] - mock_client.list_boot_devices.return_value = self.boot_devices['IPL'] - boot_device = {'boot_device': ironic.common.boot_devices.DISK, - 'persistent': True} - - mock__get_boot_device.return_value = boot_device - mock__get_next_persistent_boot_mode.return_value = 'IPL' - - self.node.driver_internal_info['clean_steps'] = [] - - drac_mgmt.set_boot_device(self.node, ironic.common.boot_devices.DISK, - persistent=True) - self.assertEqual(0, mock_list_unfinished_jobs.call_count) - self.assertEqual(0, mock_client.delete_jobs.call_count) - - mock_validate_job_queue.assert_called_once_with( - self.node, name_prefix="Configure: BIOS") - - @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, - autospec=True) - @mock.patch.object(drac_job, 'list_unfinished_jobs', spec_set=True, - autospec=True) - @mock.patch.object(drac_mgmt, '_get_boot_device', spec_set=True, - autospec=True) - @mock.patch.object(drac_mgmt, '_get_next_persistent_boot_mode', - spec_set=True, autospec=True) - def test_set_boot_device_with_multiple_unfinished_jobs_without_clean_step( - self, mock__get_next_persistent_boot_mode, mock__get_boot_device, - mock_list_unfinished_jobs, mock_validate_job_queue, - mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - - job_dict = { - 'id': 'JID_602553293345', - 'name': 'Config:RAID:RAID.Integrated.1-1', - 'start_time': 'TIME_NOW', - 'until_time': 'TIME_NA', - 'message': 'Task successfully scheduled.', - 'status': 'Scheduled', - 'percent_complete': 0} - job = test_utils.make_job(job_dict) - - bios_job_dict = { - 'id': 'JID_602553293346', - 'name': 'ConfigBIOS:BIOS.Setup.1-1', - 'start_time': 'TIME_NOW', - 'until_time': 'TIME_NA', - 'message': 'Task successfully scheduled.', - 'status': 'Scheduled', - 'percent_complete': 0} - bios_job = test_utils.make_job(bios_job_dict) - - mock_list_unfinished_jobs.return_value = [job, bios_job] - mock_client.list_boot_devices.return_value = self.boot_devices['IPL'] - boot_device = {'boot_device': ironic.common.boot_devices.DISK, - 'persistent': True} - - mock__get_boot_device.return_value = boot_device - mock__get_next_persistent_boot_mode.return_value = 'IPL' - - self.node.driver_internal_info['clean_steps'] = [] - drac_mgmt.set_boot_device(self.node, ironic.common.boot_devices.DISK, - persistent=True) - self.assertEqual(0, mock_list_unfinished_jobs.call_count) - self.assertEqual(0, mock_client.delete_jobs.call_count) - - mock_validate_job_queue.assert_called_once_with( - self.node, name_prefix="Configure: BIOS") - - @mock.patch.object(drac_mgmt, '_get_next_persistent_boot_mode', - spec_set=True, autospec=True) - @mock.patch.object(drac_mgmt, '_get_boot_device', spec_set=True, - autospec=True) - @mock.patch.object(drac_job, 'list_unfinished_jobs', spec_set=True, - autospec=True) - @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, - autospec=True) - def test_set_boot_device_with_list_unfinished_jobs_with_clean_step( - self, mock_validate_job_queue, - mock_list_unfinished_jobs, - mock__get_boot_device, - mock__get_next_persistent_boot_mode, - mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - mock_client.list_boot_devices.return_value = self.boot_devices['IPL'] - - boot_device = {'boot_device': ironic.common.boot_devices.DISK, - 'persistent': True} - mock__get_boot_device.return_value = boot_device - mock__get_next_persistent_boot_mode.return_value = 'IPL' - - mock_job = mock.Mock() - mock_job.status = "Scheduled" - mock_client.get_job.return_value = mock_job - - bios_job_dict = { - 'id': 'JID_602553293345', - 'name': 'ConfigBIOS:BIOS.Setup.1-1', - 'start_time': 'TIME_NOW', - 'until_time': 'TIME_NA', - 'message': 'Task successfully scheduled.', - 'status': 'Scheduled', - 'percent_complete': 0} - bios_job = test_utils.make_job(bios_job_dict) - mock_list_unfinished_jobs.return_value = [bios_job] - - self.node.driver_internal_info['clean_steps'] = [{ - u'interface': u'management', u'step': u'clear_job_queue'}] - boot_device = drac_mgmt.set_boot_device( - self.node, ironic.common.boot_devices.PXE, persistent=False) - mock_list_unfinished_jobs.assert_called_once_with(self.node) - mock_client.delete_jobs.assert_called_once_with( - job_ids=['JID_602553293345']) - - self.assertEqual(0, mock_validate_job_queue.call_count) - - @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, - autospec=True) - @mock.patch.object(drac_job, 'list_unfinished_jobs', spec_set=True, - autospec=True) - @mock.patch.object(drac_mgmt, '_get_boot_device', spec_set=True, - autospec=True) - @mock.patch.object(drac_mgmt, '_get_next_persistent_boot_mode', - spec_set=True, autospec=True) - def test_set_boot_device_with_multiple_unfinished_jobs_with_clean_step( - self, mock__get_next_persistent_boot_mode, mock__get_boot_device, - mock_list_unfinished_jobs, mock_validate_job_queue, - mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - - job_dict = { - 'id': 'JID_602553293345', - 'name': 'Config:RAID:RAID.Integrated.1-1', - 'start_time': 'TIME_NOW', - 'until_time': 'TIME_NA', - 'message': 'Task successfully scheduled.', - 'status': 'Scheduled', - 'percent_complete': 0} - job = test_utils.make_job(job_dict) - - bios_job_dict = { - 'id': 'JID_602553293346', - 'name': 'ConfigBIOS:BIOS.Setup.1-1', - 'start_time': 'TIME_NOW', - 'until_time': 'TIME_NA', - 'message': 'Task successfully scheduled.', - 'status': 'Scheduled', - 'percent_complete': 0} - bios_job = test_utils.make_job(bios_job_dict) - - mock_list_unfinished_jobs.return_value = [job, bios_job] - mock_client.list_boot_devices.return_value = self.boot_devices['IPL'] - boot_device = {'boot_device': ironic.common.boot_devices.DISK, - 'persistent': True} - - mock__get_boot_device.return_value = boot_device - mock__get_next_persistent_boot_mode.return_value = 'IPL' - - self.node.driver_internal_info['clean_steps'] = [{ - u'interface': u'management', u'step': u'clear_job_queue'}] - - drac_mgmt.set_boot_device(self.node, ironic.common.boot_devices.DISK, - persistent=True) - mock_list_unfinished_jobs.assert_called_once_with(self.node) - mock_client.delete_jobs.assert_called_once_with( - job_ids=['JID_602553293345', 'JID_602553293346']) - - self.assertEqual(0, mock_validate_job_queue.call_count) - - -@mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) -class DracManagementTestCase(test_utils.BaseDracTest): - - def setUp(self): - super(DracManagementTestCase, self).setUp() - self.node = obj_utils.create_test_node(self.context, - driver='idrac', - driver_info=INFO_DICT) - - def test_get_properties(self, mock_get_drac_client): - expected = drac_common.COMMON_PROPERTIES - driver = drac_mgmt.DracManagement() - self.assertEqual(expected, driver.get_properties()) - - def test_get_supported_boot_devices(self, mock_get_drac_client): - expected_boot_devices = [ironic.common.boot_devices.PXE, - ironic.common.boot_devices.DISK, - ironic.common.boot_devices.CDROM] - - with task_manager.acquire(self.context, self.node.uuid, - shared=True) as task: - boot_devices = ( - task.driver.management.get_supported_boot_devices(task)) - - self.assertEqual(sorted(expected_boot_devices), sorted(boot_devices)) - - @mock.patch.object(drac_mgmt, '_get_boot_device', spec_set=True, - autospec=True) - def test_get_boot_device(self, mock__get_boot_device, - mock_get_drac_client): - expected_boot_device = {'boot_device': ironic.common.boot_devices.DISK, - 'persistent': True} - mock__get_boot_device.return_value = expected_boot_device - - with task_manager.acquire(self.context, self.node.uuid, - shared=True) as task: - boot_device = task.driver.management.get_boot_device(task) - - self.assertEqual(expected_boot_device, boot_device) - mock__get_boot_device.assert_called_once_with(task.node) - - @mock.patch.object(drac_mgmt, '_get_boot_device', spec_set=True, - autospec=True) - def test_get_boot_device_from_driver_internal_info(self, - mock__get_boot_device, - mock_get_drac_client): - expected_boot_device = {'boot_device': ironic.common.boot_devices.DISK, - 'persistent': True} - - with task_manager.acquire(self.context, self.node.uuid, - shared=True) as task: - task.node.driver_internal_info['drac_boot_device'] = ( - expected_boot_device) - boot_device = task.driver.management.get_boot_device(task) - - self.assertEqual(expected_boot_device, boot_device) - self.assertEqual(0, mock__get_boot_device.call_count) - - def test_set_boot_device(self, mock_get_drac_client): - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - task.driver.management.set_boot_device( - task, ironic.common.boot_devices.DISK, persistent=True) - - expected_boot_device = { - 'boot_device': ironic.common.boot_devices.DISK, - 'persistent': True} - - self.node.refresh() - self.assertEqual( - self.node.driver_internal_info['drac_boot_device'], - expected_boot_device) - - def test_set_boot_device_fail(self, mock_get_drac_client): - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - self.assertRaises(exception.InvalidParameterValue, - task.driver.management.set_boot_device, task, - 'foo') - - def test_get_sensors_data(self, mock_get_drac_client): - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - self.assertRaises(NotImplementedError, - task.driver.management.get_sensors_data, task) - - def test_reset_idrac(self, mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - return_value = task.driver.management.reset_idrac(task) - mock_client.reset_idrac.assert_called_once_with( - force=True, wait=True) - - self.assertIsNone(return_value) - - def test_known_good_state(self, mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - return_value = task.driver.management.known_good_state(task) - mock_client.reset_idrac.assert_called_once_with( - force=True, wait=True) - mock_client.delete_jobs.assert_called_once_with( - job_ids=['JID_CLEARALL']) - - self.assertIsNone(return_value) - - def test_clear_job_queue(self, mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - return_value = task.driver.management.clear_job_queue(task) - mock_client.delete_jobs.assert_called_once_with( - job_ids=['JID_CLEARALL']) - - self.assertIsNone(return_value) - - class DracRedfishManagementTestCase(test_utils.BaseDracTest): def setUp(self): @@ -1453,10 +660,7 @@ class DracRedfishManagementTestCase(test_utils.BaseDracTest): 'iDRAC on node %(node)s does not support ' 'clearing Lifecycle Controller job queue ' 'using the idrac-redfish driver. ' - 'If using iDRAC9, consider upgrading firmware. ' - 'If using iDRAC8, consider switching to ' - 'idrac-wsman for management interface if ' - 'possible.', + 'If using iDRAC9, consider upgrading firmware.', {'node': task.node.uuid}) @mock.patch.object(drac_mgmt, 'LOG', autospec=True) @@ -1511,10 +715,7 @@ class DracRedfishManagementTestCase(test_utils.BaseDracTest): mock_log.warning.assert_called_once_with( 'iDRAC on node %(node)s does not support ' 'iDRAC reset using the idrac-redfish driver. ' - 'If using iDRAC9, consider upgrading firmware. ' - 'If using iDRAC8, consider switching to ' - 'idrac-wsman for management interface if ' - 'possible.', + 'If using iDRAC9, consider upgrading firmware. ', {'node': task.node.uuid}) @mock.patch.object(redfish_utils, 'wait_until_get_system_ready', diff --git a/ironic/tests/unit/drivers/modules/drac/test_periodic_task.py b/ironic/tests/unit/drivers/modules/drac/test_periodic_task.py deleted file mode 100644 index 6ba590fcd7..0000000000 --- a/ironic/tests/unit/drivers/modules/drac/test_periodic_task.py +++ /dev/null @@ -1,458 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Test class for DRAC periodic tasks -""" - -from unittest import mock - -from ironic.conductor import task_manager -from ironic.conductor import utils as manager_utils -from ironic.drivers.modules.drac import common as drac_common -from ironic.drivers.modules.drac import raid as drac_raid -from ironic.tests.unit.db import base as db_base -from ironic.tests.unit.drivers.modules.drac import utils as test_utils -from ironic.tests.unit.objects import utils as obj_utils - -INFO_DICT = test_utils.INFO_DICT - - -class DracPeriodicTaskTestCase(db_base.DbTestCase): - - def setUp(self): - super(DracPeriodicTaskTestCase, self).setUp() - self.node = obj_utils.create_test_node(self.context, - driver='idrac', - driver_info=INFO_DICT) - self.raid = drac_raid.DracRAID() - self.raid_wsman = drac_raid.DracWSManRAID() - self.job = { - 'id': 'JID_001436912645', - 'name': 'ConfigBIOS:BIOS.Setup.1-1', - 'start_time': '00000101000000', - 'until_time': 'TIME_NA', - 'message': 'Job in progress', - 'status': 'Running', - 'percent_complete': 34} - self.virtual_disk = { - 'id': 'Disk.Virtual.0:RAID.Integrated.1-1', - 'name': 'disk 0', - 'description': 'Virtual Disk 0 on Integrated RAID Controller 1', - 'controller': 'RAID.Integrated.1-1', - 'raid_level': '1', - 'size_mb': 571776, - 'status': 'ok', - 'raid_status': 'online', - 'span_depth': 1, - 'span_length': 2, - 'pending_operations': None - } - - def test__query_raid_config_job_status_drac(self): - self._test__query_raid_config_job_status(self.raid) - - def test__query_raid_config_job_status_drac_wsman(self): - self._test__query_raid_config_job_status(self.raid_wsman) - - @mock.patch.object(task_manager, 'acquire', autospec=True) - def _test__query_raid_config_job_status(self, raid, mock_acquire): - # mock node.driver_internal_info - driver_internal_info = {'raid_config_job_ids': ['42']} - self.node.driver_internal_info = driver_internal_info - self.node.save() - # mock manager - mock_manager = mock.Mock() - node_list = [(self.node.uuid, 'idrac', '', - {'raid_config_job_ids': ['42']})] - mock_manager.iter_nodes.return_value = node_list - # mock task_manager.acquire - task = mock.Mock(node=self.node, driver=mock.Mock(raid=raid)) - mock_acquire.return_value = mock.MagicMock( - __enter__=mock.MagicMock(return_value=task)) - # mock _check_node_raid_jobs - raid._check_node_raid_jobs = mock.Mock() - - raid._query_raid_config_job_status(mock_manager, - self.context) - - raid._check_node_raid_jobs.assert_called_once_with(task) - - def test__query_raid_config_job_status_no_config_jobs_drac(self): - self._test__query_raid_config_job_status_no_config_jobs(self.raid) - - def test__query_raid_config_job_status_no_config_jobs_drac_wsman(self): - self._test__query_raid_config_job_status_no_config_jobs( - self.raid_wsman) - - @mock.patch.object(task_manager, 'acquire', autospec=True) - def _test__query_raid_config_job_status_no_config_jobs(self, raid, - mock_acquire): - # mock manager - mock_manager = mock.Mock() - node_list = [(self.node.uuid, 'idrac', '', {})] - mock_manager.iter_nodes.return_value = node_list - # mock task_manager.acquire - task = mock.Mock(node=self.node, driver=mock.Mock(raid=raid)) - mock_acquire.return_value = mock.MagicMock( - __enter__=mock.MagicMock(return_value=task)) - # mock _check_node_raid_jobs - raid._check_node_raid_jobs = mock.Mock() - - raid._query_raid_config_job_status(mock_manager, None) - - self.assertEqual(0, raid._check_node_raid_jobs.call_count) - - def test__query_raid_config_job_status_no_nodes(self): - # mock manager - mock_manager = mock.Mock() - node_list = [] - mock_manager.iter_nodes.return_value = node_list - # mock _check_node_raid_jobs - self.raid._check_node_raid_jobs = mock.Mock() - - self.raid._query_raid_config_job_status(mock_manager, None) - - self.assertEqual(0, self.raid._check_node_raid_jobs.call_count) - - @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) - def test__check_node_raid_jobs_without_update(self, mock_get_drac_client): - # mock node.driver_internal_info - driver_internal_info = {'raid_config_job_ids': ['42']} - self.node.driver_internal_info = driver_internal_info - self.node.save() - # mock task - task = mock.Mock(node=self.node) - # mock dracclient.get_job - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - mock_client.get_job.return_value = test_utils.dict_to_namedtuple( - values=self.job) - - self.raid._check_node_raid_jobs(task) - - mock_client.get_job.assert_called_once_with('42') - self.assertEqual(0, mock_client.list_virtual_disks.call_count) - self.node.refresh() - self.assertEqual(['42'], - self.node.driver_internal_info['raid_config_job_ids']) - self.assertEqual({}, self.node.raid_config) - self.assertIs(False, self.node.maintenance) - - @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid.DracRAID, 'get_logical_disks', - spec_set=True, autospec=True) - def _test__check_node_raid_jobs_with_completed_job( - self, mock_notify_conductor_resume, - mock_get_logical_disks, mock_get_drac_client): - expected_logical_disk = {'size_gb': 558, - 'raid_level': '1', - 'name': 'disk 0'} - # mock node.driver_internal_info - driver_internal_info = {'raid_config_job_ids': ['42']} - self.node.driver_internal_info = driver_internal_info - self.node.save() - # mock task - task = mock.Mock(node=self.node, context=self.context) - # mock dracclient.get_job - self.job['status'] = 'Completed' - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - mock_client.get_job.return_value = test_utils.dict_to_namedtuple( - values=self.job) - # mock driver.raid.get_logical_disks - mock_get_logical_disks.return_value = { - 'logical_disks': [expected_logical_disk] - } - - self.raid._check_node_raid_jobs(task) - - mock_client.get_job.assert_called_once_with('42') - self.node.refresh() - self.assertEqual([], - self.node.driver_internal_info['raid_config_job_ids']) - self.assertEqual([expected_logical_disk], - self.node.raid_config['logical_disks']) - mock_notify_conductor_resume.assert_called_once_with(task) - - @mock.patch.object(manager_utils, 'notify_conductor_resume_clean', - autospec=True) - def test__check_node_raid_jobs_with_completed_job_in_clean( - self, mock_notify_conductor_resume): - self.node.clean_step = {'foo': 'bar'} - self.node.save() - self._test__check_node_raid_jobs_with_completed_job( - mock_notify_conductor_resume) - - @mock.patch.object(manager_utils, 'notify_conductor_resume_deploy', - autospec=True) - def test__check_node_raid_jobs_with_completed_job_in_deploy( - self, mock_notify_conductor_resume): - self._test__check_node_raid_jobs_with_completed_job( - mock_notify_conductor_resume) - - @mock.patch.object(manager_utils, 'cleaning_error_handler', autospec=True) - @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) - def test__check_node_raid_jobs_with_failed_job( - self, mock_get_drac_client, mock_cleaning_error_handler): - # mock node.driver_internal_info and node.clean_step - driver_internal_info = {'raid_config_job_ids': ['42']} - self.node.driver_internal_info = driver_internal_info - self.node.clean_step = {'foo': 'bar'} - self.node.save() - # mock task - task = mock.Mock(node=self.node, context=self.context) - # mock dracclient.get_job - self.job['status'] = 'Failed' - self.job['message'] = 'boom' - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - mock_client.get_job.return_value = test_utils.dict_to_namedtuple( - values=self.job) - # mock dracclient.list_virtual_disks - mock_client.list_virtual_disks.return_value = [ - test_utils.dict_to_namedtuple(values=self.virtual_disk)] - - self.raid._check_node_raid_jobs(task) - - mock_client.get_job.assert_called_once_with('42') - self.assertEqual(0, mock_client.list_virtual_disks.call_count) - self.node.refresh() - self.assertEqual([], - self.node.driver_internal_info['raid_config_job_ids']) - self.assertEqual({}, self.node.raid_config) - mock_cleaning_error_handler.assert_called_once_with(task, mock.ANY) - - @mock.patch.object(manager_utils, 'cleaning_error_handler', autospec=True) - @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) - def test__check_node_raid_jobs_with_completed_with_errors_job( - self, mock_get_drac_client, mock_cleaning_error_handler): - # mock node.driver_internal_info and node.clean_step - driver_internal_info = {'raid_config_job_ids': ['42']} - self.node.driver_internal_info = driver_internal_info - self.node.clean_step = {'foo': 'bar'} - self.node.save() - # mock task - task = mock.Mock(node=self.node, context=self.context) - # mock dracclient.get_job - self.job['status'] = 'Completed with Errors' - self.job['message'] = 'PR31: Completed with Errors' - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - mock_client.get_job.return_value = test_utils.dict_to_namedtuple( - values=self.job) - # mock dracclient.list_virtual_disks - mock_client.list_virtual_disks.return_value = [ - test_utils.dict_to_namedtuple(values=self.virtual_disk)] - - self.raid._check_node_raid_jobs(task) - - mock_client.get_job.assert_called_once_with('42') - self.assertEqual(0, mock_client.list_virtual_disks.call_count) - self.node.refresh() - self.assertEqual([], - self.node.driver_internal_info['raid_config_job_ids']) - self.assertEqual({}, self.node.raid_config) - mock_cleaning_error_handler.assert_called_once_with(task, mock.ANY) - - @mock.patch.object(manager_utils, 'deploying_error_handler', autospec=True) - @mock.patch.object(manager_utils, 'cleaning_error_handler', autospec=True) - @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid.DracRAID, 'get_logical_disks', - spec_set=True, autospec=True) - def _test__check_node_raid_jobs_with_completed_job_already_failed( - self, mock_notify_conductor_resume, - mock_get_logical_disks, mock_get_drac_client, - mock_cleaning_error_handler, mock_deploying_error_handler): - expected_logical_disk = {'size_gb': 558, - 'raid_level': '1', - 'name': 'disk 0'} - # mock node.driver_internal_info - driver_internal_info = {'raid_config_job_ids': ['42'], - 'raid_config_job_failure': True} - self.node.driver_internal_info = driver_internal_info - self.node.save() - # mock task - task = mock.Mock(node=self.node, context=self.context) - # mock dracclient.get_job - self.job['status'] = 'Completed' - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - mock_client.get_job.return_value = test_utils.dict_to_namedtuple( - values=self.job) - # mock driver.raid.get_logical_disks - mock_get_logical_disks.return_value = { - 'logical_disks': [expected_logical_disk] - } - - self.raid._check_node_raid_jobs(task) - - mock_client.get_job.assert_called_once_with('42') - self.node.refresh() - self.assertEqual([], - self.node.driver_internal_info['raid_config_job_ids']) - self.assertNotIn('raid_config_job_failure', - self.node.driver_internal_info) - self.assertNotIn('logical_disks', self.node.raid_config) - if self.node.clean_step: - mock_cleaning_error_handler.assert_called_once_with(task, mock.ANY) - else: - mock_deploying_error_handler.assert_called_once_with(task, - mock.ANY, - mock.ANY) - self.assertFalse(mock_notify_conductor_resume.called) - - @mock.patch.object(manager_utils, 'notify_conductor_resume_clean', - autospec=True) - def test__check_node_raid_jobs_with_completed_job_already_failed_in_clean( - self, mock_notify_conductor_resume): - self.node.clean_step = {'foo': 'bar'} - self.node.save() - self._test__check_node_raid_jobs_with_completed_job_already_failed( - mock_notify_conductor_resume) - - @mock.patch.object(manager_utils, 'notify_conductor_resume_deploy', - autospec=True) - def test__check_node_raid_jobs_with_completed_job_already_failed_in_deploy( - self, mock_notify_conductor_resume): - self._test__check_node_raid_jobs_with_completed_job_already_failed( - mock_notify_conductor_resume) - - @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid.DracRAID, 'get_logical_disks', - spec_set=True, autospec=True) - def _test__check_node_raid_jobs_with_multiple_jobs_completed( - self, mock_notify_conductor_resume, - mock_get_logical_disks, mock_get_drac_client): - expected_logical_disk = {'size_gb': 558, - 'raid_level': '1', - 'name': 'disk 0'} - # mock node.driver_internal_info - driver_internal_info = {'raid_config_job_ids': ['42', '36']} - self.node.driver_internal_info = driver_internal_info - self.node.save() - # mock task - task = mock.Mock(node=self.node, context=self.context) - # mock dracclient.get_job - self.job['status'] = 'Completed' - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - mock_client.get_job.return_value = test_utils.dict_to_namedtuple( - values=self.job) - # mock driver.raid.get_logical_disks - mock_get_logical_disks.return_value = { - 'logical_disks': [expected_logical_disk] - } - - self.raid._check_node_raid_jobs(task) - - mock_client.get_job.assert_has_calls([mock.call('42'), - mock.call('36')]) - self.node.refresh() - self.assertEqual([], - self.node.driver_internal_info['raid_config_job_ids']) - self.assertNotIn('raid_config_job_failure', - self.node.driver_internal_info) - self.assertEqual([expected_logical_disk], - self.node.raid_config['logical_disks']) - mock_notify_conductor_resume.assert_called_once_with(task) - - @mock.patch.object(manager_utils, 'notify_conductor_resume_clean', - autospec=True) - def test__check_node_raid_jobs_with_multiple_jobs_completed_in_clean( - self, mock_notify_conductor_resume): - self.node.clean_step = {'foo': 'bar'} - self.node.save() - self._test__check_node_raid_jobs_with_multiple_jobs_completed( - mock_notify_conductor_resume) - - @mock.patch.object(manager_utils, 'notify_conductor_resume_deploy', - autospec=True) - def test__check_node_raid_jobs_with_multiple_jobs_completed_in_deploy( - self, mock_notify_conductor_resume): - self._test__check_node_raid_jobs_with_multiple_jobs_completed( - mock_notify_conductor_resume) - - @mock.patch.object(manager_utils, 'deploying_error_handler', autospec=True) - @mock.patch.object(manager_utils, 'cleaning_error_handler', autospec=True) - @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid.DracRAID, 'get_logical_disks', - spec_set=True, autospec=True) - def _test__check_node_raid_jobs_with_multiple_jobs_failed( - self, mock_notify_conductor_resume, - mock_get_logical_disks, mock_get_drac_client, - mock_cleaning_error_handler, mock_deploying_error_handler): - expected_logical_disk = {'size_gb': 558, - 'raid_level': '1', - 'name': 'disk 0'} - # mock node.driver_internal_info - driver_internal_info = {'raid_config_job_ids': ['42', '36']} - self.node.driver_internal_info = driver_internal_info - self.node.save() - # mock task - task = mock.Mock(node=self.node, context=self.context) - # mock dracclient.get_job - self.job['status'] = 'Completed' - failed_job = self.job.copy() - failed_job['status'] = 'Failed' - failed_job['message'] = 'boom' - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - mock_client.get_job.side_effect = [ - test_utils.dict_to_namedtuple(values=failed_job), - test_utils.dict_to_namedtuple(values=self.job)] - # mock driver.raid.get_logical_disks - mock_get_logical_disks.return_value = { - 'logical_disks': [expected_logical_disk] - } - - self.raid._check_node_raid_jobs(task) - - mock_client.get_job.assert_has_calls([mock.call('42'), - mock.call('36')]) - self.node.refresh() - self.assertEqual([], - self.node.driver_internal_info['raid_config_job_ids']) - self.assertNotIn('raid_config_job_failure', - self.node.driver_internal_info) - self.assertNotIn('logical_disks', self.node.raid_config) - if self.node.clean_step: - mock_cleaning_error_handler.assert_called_once_with(task, mock.ANY) - else: - mock_deploying_error_handler.assert_called_once_with(task, - mock.ANY, - mock.ANY) - self.assertFalse(mock_notify_conductor_resume.called) - - @mock.patch.object(manager_utils, 'notify_conductor_resume_clean', - autospec=True) - def test__check_node_raid_jobs_with_multiple_jobs_failed_in_clean( - self, mock_notify_conductor_resume): - self.node.clean_step = {'foo': 'bar'} - self.node.save() - self._test__check_node_raid_jobs_with_multiple_jobs_failed( - mock_notify_conductor_resume) - - @mock.patch.object(manager_utils, 'notify_conductor_resume_deploy', - autospec=True) - def test__check_node_raid_jobs_with_multiple_jobs_failed_in_deploy( - self, mock_notify_conductor_resume): - self._test__check_node_raid_jobs_with_multiple_jobs_failed( - mock_notify_conductor_resume) diff --git a/ironic/tests/unit/drivers/modules/drac/test_power.py b/ironic/tests/unit/drivers/modules/drac/test_power.py deleted file mode 100644 index 47819589cd..0000000000 --- a/ironic/tests/unit/drivers/modules/drac/test_power.py +++ /dev/null @@ -1,212 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Test class for DRAC power interface -""" - -from unittest import mock - -from dracclient import constants as drac_constants -from dracclient import exceptions as drac_exceptions -from oslo_service import loopingcall - -from ironic.common import exception -from ironic.common import states -from ironic.conductor import task_manager -from ironic.drivers.modules.drac import common as drac_common -from ironic.drivers.modules.drac import power as drac_power -from ironic.tests.unit.drivers.modules.drac import utils as test_utils -from ironic.tests.unit.objects import utils as obj_utils - -INFO_DICT = test_utils.INFO_DICT - - -@mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) -class DracPowerTestCase(test_utils.BaseDracTest): - - def setUp(self): - super(DracPowerTestCase, self).setUp() - self.node = obj_utils.create_test_node(self.context, - driver='idrac', - driver_info=INFO_DICT) - - def test_get_properties(self, mock_get_drac_client): - expected = drac_common.COMMON_PROPERTIES - driver = drac_power.DracPower() - self.assertEqual(expected, driver.get_properties()) - - def test_get_power_state(self, mock_get_drac_client): - mock_client = mock_get_drac_client.return_value - mock_client.get_power_state.return_value = drac_constants.POWER_ON - - with task_manager.acquire(self.context, self.node.uuid, - shared=True) as task: - power_state = task.driver.power.get_power_state(task) - - self.assertEqual(states.POWER_ON, power_state) - mock_client.get_power_state.assert_called_once_with() - - def test_get_power_state_fail(self, mock_get_drac_client): - mock_client = mock_get_drac_client.return_value - exc = drac_exceptions.BaseClientException('boom') - mock_client.get_power_state.side_effect = exc - - with task_manager.acquire(self.context, self.node.uuid, - shared=True) as task: - self.assertRaises(exception.DracOperationError, - task.driver.power.get_power_state, task) - - mock_client.get_power_state.assert_called_once_with() - - @mock.patch.object(loopingcall.BackOffLoopingCall, '_sleep', autospec=True) - @mock.patch.object(drac_power.LOG, 'warning', autospec=True) - def test_set_power_state(self, mock_log, mock_sleep, mock_get_drac_client): - mock_client = mock_get_drac_client.return_value - mock_client.get_power_state.side_effect = [drac_constants.POWER_ON, - drac_constants.POWER_OFF] - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - task.driver.power.set_power_state(task, states.POWER_OFF) - - drac_power_state = drac_power.REVERSE_POWER_STATES[states.POWER_OFF] - mock_client.set_power_state.assert_called_once_with(drac_power_state) - self.assertFalse(mock_log.called) - - def test_set_power_state_fail(self, mock_get_drac_client): - mock_client = mock_get_drac_client.return_value - exc = drac_exceptions.BaseClientException('boom') - mock_client.set_power_state.side_effect = exc - - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - self.assertRaises(exception.DracOperationError, - task.driver.power.set_power_state, task, - states.POWER_OFF) - - drac_power_state = drac_power.REVERSE_POWER_STATES[states.POWER_OFF] - mock_client.set_power_state.assert_called_once_with(drac_power_state) - - @mock.patch.object(loopingcall.BackOffLoopingCall, '_sleep', autospec=True) - @mock.patch.object(drac_power.LOG, 'warning', autospec=True) - def test_set_power_state_timeout(self, mock_log, mock_sleep, - mock_get_drac_client): - mock_client = mock_get_drac_client.return_value - mock_client.get_power_state.side_effect = [drac_constants.POWER_ON, - drac_constants.POWER_OFF] - - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - task.driver.power.set_power_state(task, states.POWER_OFF, - timeout=11) - - drac_power_state = drac_power.REVERSE_POWER_STATES[states.POWER_OFF] - mock_client.set_power_state.assert_called_once_with(drac_power_state) - self.assertFalse(mock_log.called) - - @mock.patch.object(drac_power.LOG, 'warning', autospec=True) - def test_reboot_while_powered_on(self, mock_log, mock_get_drac_client): - mock_client = mock_get_drac_client.return_value - mock_client.get_power_state.return_value = drac_constants.POWER_ON - - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - task.driver.power.reboot(task) - - drac_power_state = drac_power.REVERSE_POWER_STATES[states.REBOOT] - mock_client.set_power_state.assert_called_once_with(drac_power_state) - self.assertFalse(mock_log.called) - - @mock.patch.object(drac_power.LOG, 'warning', autospec=True) - def test_reboot_while_powered_on_timeout(self, mock_log, - mock_get_drac_client): - mock_client = mock_get_drac_client.return_value - mock_client.get_power_state.return_value = drac_constants.POWER_ON - - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - task.driver.power.reboot(task, timeout=42) - - drac_power_state = drac_power.REVERSE_POWER_STATES[states.REBOOT] - mock_client.set_power_state.assert_called_once_with(drac_power_state) - self.assertTrue(mock_log.called) - - def test_reboot_while_powered_off(self, mock_get_drac_client): - mock_client = mock_get_drac_client.return_value - mock_client.get_power_state.side_effect = [drac_constants.POWER_OFF, - drac_constants.POWER_ON] - - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - task.driver.power.reboot(task) - - drac_power_state = drac_power.REVERSE_POWER_STATES[states.POWER_ON] - mock_client.set_power_state.assert_called_once_with(drac_power_state) - - @mock.patch('time.sleep', autospec=True) - def test_reboot_retries_success(self, mock_sleep, mock_get_drac_client): - mock_client = mock_get_drac_client.return_value - mock_client.get_power_state.side_effect = [drac_constants.POWER_OFF, - drac_constants.POWER_OFF, - drac_constants.POWER_ON] - exc = drac_exceptions.DRACOperationFailed( - drac_messages=['The command failed to set RequestedState']) - mock_client.set_power_state.side_effect = [exc, None] - - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - task.driver.power.reboot(task) - - drac_power_state = drac_power.REVERSE_POWER_STATES[states.POWER_ON] - self.assertEqual(2, mock_client.set_power_state.call_count) - mock_client.set_power_state.assert_has_calls( - [mock.call(drac_power_state), - mock.call(drac_power_state)]) - - @mock.patch('time.sleep', autospec=True) - def test_reboot_retries_fail(self, mock_sleep, mock_get_drac_client): - mock_client = mock_get_drac_client.return_value - mock_client.get_power_state.return_value = drac_constants.POWER_OFF - exc = drac_exceptions.DRACOperationFailed( - drac_messages=['The command failed to set RequestedState']) - mock_client.set_power_state.side_effect = exc - - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - self.assertRaises(exception.DracOperationError, - task.driver.power.reboot, task) - - self.assertEqual(drac_power.POWER_STATE_TRIES, - mock_client.set_power_state.call_count) - - @mock.patch('time.sleep', autospec=True) - def test_reboot_retries_power_change_success(self, mock_sleep, - mock_get_drac_client): - mock_client = mock_get_drac_client.return_value - mock_client.get_power_state.side_effect = [drac_constants.POWER_OFF, - drac_constants.POWER_ON] - exc = drac_exceptions.DRACOperationFailed( - drac_messages=['The command failed to set RequestedState']) - mock_client.set_power_state.side_effect = [exc, None] - - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - task.driver.power.reboot(task) - - self.assertEqual(2, mock_client.set_power_state.call_count) - drac_power_state1 = drac_power.REVERSE_POWER_STATES[states.POWER_ON] - drac_power_state2 = drac_power.REVERSE_POWER_STATES[states.REBOOT] - mock_client.set_power_state.assert_has_calls( - [mock.call(drac_power_state1), - mock.call(drac_power_state2)]) diff --git a/ironic/tests/unit/drivers/modules/drac/test_raid.py b/ironic/tests/unit/drivers/modules/drac/test_raid.py index 091906704b..fc588373c9 100644 --- a/ironic/tests/unit/drivers/modules/drac/test_raid.py +++ b/ironic/tests/unit/drivers/modules/drac/test_raid.py @@ -15,11 +15,8 @@ Test class for DRAC RAID interface """ -from collections import defaultdict from unittest import mock -from dracclient import constants -from dracclient import exceptions as drac_exceptions from oslo_utils import importutils import sushy import tenacity @@ -29,10 +26,7 @@ from ironic.common import states from ironic.conductor import task_manager from ironic.conductor import utils as manager_utils from ironic.conf import CONF -from ironic.drivers import base from ironic.drivers.modules import deploy_utils -from ironic.drivers.modules.drac import common as drac_common -from ironic.drivers.modules.drac import job as drac_job from ironic.drivers.modules.drac import raid as drac_raid from ironic.drivers.modules.drac import utils as drac_utils from ironic.drivers.modules.redfish import raid as redfish_raid @@ -45,448 +39,6 @@ sushy_oem_idrac = importutils.try_import('sushy_oem_idrac') INFO_DICT = test_utils.INFO_DICT -@mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) -class DracQueryRaidConfigurationTestCase(test_utils.BaseDracTest): - - def setUp(self): - super(DracQueryRaidConfigurationTestCase, self).setUp() - self.node = obj_utils.create_test_node(self.context, - driver='idrac', - driver_info=INFO_DICT) - - raid_controller_dict = { - 'id': 'RAID.Integrated.1-1', - 'description': 'Integrated RAID Controller 1', - 'manufacturer': 'DELL', - 'model': 'PERC H710 Mini', - 'primary_status': 'ok', - 'firmware_version': '21.3.0-0009', - 'bus': '1', - 'supports_realtime': True} - self.raid_controller = test_utils.make_raid_controller( - raid_controller_dict) - - virtual_disk_dict = { - 'id': 'Disk.Virtual.0:RAID.Integrated.1-1', - 'name': 'disk 0', - 'description': 'Virtual Disk 0 on Integrated RAID Controller 1', - 'controller': 'RAID.Integrated.1-1', - 'raid_level': '1', - 'size_mb': 571776, - 'status': 'ok', - 'raid_status': 'online', - 'span_depth': 1, - 'span_length': 2, - 'pending_operations': None, - 'physical_disks': []} - self.virtual_disk = test_utils.make_virtual_disk(virtual_disk_dict) - - physical_disk_dict = { - 'id': 'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1', - 'description': ('Disk 1 in Backplane 1 of ' - 'Integrated RAID Controller 1'), - 'controller': 'RAID.Integrated.1-1', - 'manufacturer': 'SEAGATE', - 'model': 'ST600MM0006', - 'media_type': 'hdd', - 'interface_type': 'sas', - 'size_mb': 571776, - 'free_size_mb': 571776, - 'serial_number': 'S0M3EY2Z', - 'firmware_version': 'LS0A', - 'status': 'ok', - 'raid_status': 'ready', - 'sas_address': '500056B37789ABE3', - 'device_protocol': None} - self.physical_disk = test_utils.make_physical_disk(physical_disk_dict) - - def test_list_raid_controllers(self, mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - mock_client.list_raid_controllers.return_value = [self.raid_controller] - - raid_controllers = drac_raid.list_raid_controllers(self.node) - - mock_client.list_raid_controllers.assert_called_once_with() - self.assertEqual(self.raid_controller, raid_controllers[0]) - - def test_list_raid_controllers_fail(self, mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - exc = exception.DracOperationError('boom') - mock_client.list_raid_controllers.side_effect = exc - - self.assertRaises(exception.DracOperationError, - drac_raid.list_raid_controllers, self.node) - - def test_list_virtual_disks(self, mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - mock_client.list_virtual_disks.return_value = [self.virtual_disk] - - virtual_disks = drac_raid.list_virtual_disks(self.node) - - mock_client.list_virtual_disks.assert_called_once_with() - self.assertEqual(self.virtual_disk, virtual_disks[0]) - - def test_list_virtual_disks_fail(self, mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - exc = exception.DracOperationError('boom') - mock_client.list_virtual_disks.side_effect = exc - - self.assertRaises(exception.DracOperationError, - drac_raid.list_virtual_disks, self.node) - - def test_list_physical_disks(self, mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - mock_client.list_physical_disks.return_value = [self.physical_disk] - - physical_disks = drac_raid.list_physical_disks(self.node) - - mock_client.list_physical_disks.assert_called_once_with() - self.assertEqual(self.physical_disk, physical_disks[0]) - - def test_list_physical_disks_fail(self, mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - exc = exception.DracOperationError('boom') - mock_client.list_physical_disks.side_effect = exc - - self.assertRaises(exception.DracOperationError, - drac_raid.list_physical_disks, self.node) - - -@mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) -class DracManageVirtualDisksTestCase(test_utils.BaseDracTest): - - def setUp(self): - super(DracManageVirtualDisksTestCase, self).setUp() - self.node = obj_utils.create_test_node(self.context, - driver='idrac', - driver_info=INFO_DICT) - - @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, - autospec=True) - def test_create_virtual_disk(self, mock_validate_job_queue, - mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - - drac_raid.create_virtual_disk( - self.node, 'controller', ['disk1', 'disk2'], '1+0', 43008) - - mock_validate_job_queue.assert_called_once_with( - self.node, name_prefix='Config:RAID:controller') - mock_client.create_virtual_disk.assert_called_once_with( - 'controller', ['disk1', 'disk2'], '1+0', 43008, None, None, None) - - @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, - autospec=True) - def test_create_virtual_disk_with_optional_attrs(self, - mock_validate_job_queue, - mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - - drac_raid.create_virtual_disk( - self.node, 'controller', ['disk1', 'disk2'], '1+0', 43008, - disk_name='name', span_length=3, span_depth=2) - - mock_validate_job_queue.assert_called_once_with( - self.node, name_prefix='Config:RAID:controller') - mock_client.create_virtual_disk.assert_called_once_with( - 'controller', ['disk1', 'disk2'], '1+0', 43008, 'name', 3, 2) - - @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, - autospec=True) - def test_create_virtual_disk_fail(self, mock_validate_job_queue, - mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - exc = drac_exceptions.BaseClientException('boom') - mock_client.create_virtual_disk.side_effect = exc - - self.assertRaises( - exception.DracOperationError, drac_raid.create_virtual_disk, - self.node, 'controller', ['disk1', 'disk2'], '1+0', 42) - - @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, - autospec=True) - def test_delete_virtual_disk(self, mock_validate_job_queue, - mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - - drac_raid.delete_virtual_disk(self.node, 'disk1') - - mock_validate_job_queue.assert_called_once_with(self.node) - mock_client.delete_virtual_disk.assert_called_once_with('disk1') - - @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, - autospec=True) - def test_delete_virtual_disk_fail(self, mock_validate_job_queue, - mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - exc = drac_exceptions.BaseClientException('boom') - mock_client.delete_virtual_disk.side_effect = exc - - self.assertRaises( - exception.DracOperationError, drac_raid.delete_virtual_disk, - self.node, 'disk1') - - @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, - autospec=True) - def test__reset_raid_config(self, mock_validate_job_queue, - mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - - drac_raid._reset_raid_config( - self.node, 'controller') - - mock_validate_job_queue.assert_called_once_with( - self.node, name_prefix='Config:RAID:controller') - mock_client.reset_raid_config.assert_called_once_with( - 'controller') - - @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, - autospec=True) - def test__reset_raid_config_fail(self, mock_validate_job_queue, - mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - - exc = drac_exceptions.BaseClientException('boom') - mock_client.reset_raid_config.side_effect = exc - - self.assertRaises( - exception.DracOperationError, drac_raid._reset_raid_config, - self.node, 'RAID.Integrated.1-1') - - @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, - autospec=True) - def test_clear_foreign_config(self, mock_validate_job_queue, - mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - - drac_raid.clear_foreign_config( - self.node, 'RAID.Integrated.1-1') - - mock_validate_job_queue.assert_called_once_with( - self.node, 'Config:RAID:RAID.Integrated.1-1') - mock_client.clear_foreign_config.assert_called_once_with( - 'RAID.Integrated.1-1') - - @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, - autospec=True) - def test_clear_foreign_config_fail(self, mock_validate_job_queue, - mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - - exc = drac_exceptions.BaseClientException('boom') - mock_client.clear_foreign_config.side_effect = exc - - self.assertRaises( - exception.DracOperationError, drac_raid.clear_foreign_config, - self.node, 'RAID.Integrated.1-1') - - @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, - autospec=True) - def test_set_raid_settings(self, mock_validate_job_queue, - mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - controller_fqdd = "RAID.Integrated.1-1" - raid_cntrl_attr = "RAID.Integrated.1-1:RAIDRequestedControllerMode" - raid_settings = {raid_cntrl_attr: 'RAID'} - drac_raid.set_raid_settings(self.node, controller_fqdd, raid_settings) - - mock_validate_job_queue.assert_called_once_with( - self.node) - mock_client.set_raid_settings.assert_called_once_with( - controller_fqdd, raid_settings) - - @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, - autospec=True) - def test_list_raid_settings(self, mock_validate_job_queue, - mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - drac_raid.list_raid_settings(self.node) - mock_validate_job_queue.assert_called_once_with( - self.node) - mock_client.list_raid_settings.assert_called_once_with() - - @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, - autospec=True) - def test_change_physical_disk_state(self, - mock_validate_job_queue, - mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - controllers_to_physical_disk_ids = {'RAID.Integrated.1-1': [ - 'Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1', - 'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1']} - expected_change_disk_state = { - 'is_reboot_required': True, - 'conversion_results': { - 'RAID.Integrated.1-1': {'is_reboot_required': 'optional', - 'is_commit_required': True}}, - 'commit_required_ids': ['RAID.Integrated.1-1']} - mode = constants.RaidStatus.raid - mock_client.change_physical_disk_state.return_value = \ - expected_change_disk_state - actual_change_disk_state = drac_raid.change_physical_disk_state( - self.node, - mode=mode, - controllers_to_physical_disk_ids=controllers_to_physical_disk_ids) - - mock_validate_job_queue.assert_called_once_with(self.node) - mock_client.change_physical_disk_state.assert_called_once_with( - mode, controllers_to_physical_disk_ids) - self.assertEqual(expected_change_disk_state, actual_change_disk_state) - - @mock.patch.object(drac_raid, 'change_physical_disk_state', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'commit_config', spec_set=True, - autospec=True) - def test__change_physical_disk_mode(self, - mock_commit_config, - mock_change_physical_disk_state, - mock_get_drac_client): - mock_commit_config.return_value = '42' - mock_change_physical_disk_state.return_value = { - 'is_reboot_required': constants.RebootRequired.optional, - 'conversion_results': { - 'RAID.Integrated.1-1': { - 'is_reboot_required': constants.RebootRequired.optional, - 'is_commit_required': True}}, - 'commit_required_ids': ['RAID.Integrated.1-1']} - - actual_change_disk_state = drac_raid._change_physical_disk_mode( - self.node, mode=constants.RaidStatus.raid) - self.assertEqual(['42'], - self.node.driver_internal_info['raid_config_job_ids']) - self.assertEqual('completed', - self.node.driver_internal_info['raid_config_substep']) - self.assertEqual( - ['RAID.Integrated.1-1'], - self.node.driver_internal_info['raid_config_parameters']) - mock_commit_config.assert_called_once_with( - self.node, raid_controller='RAID.Integrated.1-1', reboot=False, - realtime=True) - self.assertEqual(states.DEPLOYWAIT, actual_change_disk_state) - - def test_commit_config(self, mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - - drac_raid.commit_config(self.node, 'controller1') - - mock_client.commit_pending_raid_changes.assert_called_once_with( - raid_controller='controller1', reboot=False, realtime=False) - - def test_commit_config_with_reboot(self, mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - - drac_raid.commit_config(self.node, 'controller1', reboot=True, - realtime=False) - - mock_client.commit_pending_raid_changes.assert_called_once_with( - raid_controller='controller1', reboot=True, realtime=False) - - def test_commit_config_with_realtime(self, mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - - drac_raid.commit_config(self.node, 'RAID.Integrated.1-1', reboot=False, - realtime=True) - - mock_client.commit_pending_raid_changes.assert_called_once_with( - raid_controller='RAID.Integrated.1-1', reboot=False, realtime=True) - - def test_commit_config_fail(self, mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - exc = drac_exceptions.BaseClientException('boom') - mock_client.commit_pending_raid_changes.side_effect = exc - - self.assertRaises( - exception.DracOperationError, drac_raid.commit_config, self.node, - 'controller1') - - @mock.patch.object(drac_raid, 'commit_config', spec_set=True, - autospec=True) - def test__commit_to_controllers_with_config_job(self, mock_commit_config, - mock_get_drac_client): - controllers = [{'is_reboot_required': 'true', - 'is_commit_required': True, - 'is_ehba_mode': False, - 'raid_controller': 'AHCI.Slot.3-1'}] - substep = "delete_foreign_config" - - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - mock_commit_config.return_value = "42" - drac_raid._commit_to_controllers(self.node, - controllers=controllers, - substep=substep) - - self.assertEqual(1, mock_commit_config.call_count) - self.assertEqual(['42'], - self.node.driver_internal_info['raid_config_job_ids']) - self.assertEqual(substep, - self.node.driver_internal_info['raid_config_substep']) - - @mock.patch.object(drac_raid, 'commit_config', spec_set=True, - autospec=True) - def test__commit_to_controllers_without_config_job( - self, mock_commit_config, mock_get_drac_client): - controllers = [{'is_reboot_required': 'true', - 'is_commit_required': False, - 'raid_controller': 'AHCI.Slot.3-1'}] - substep = "delete_foreign_config" - - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - mock_commit_config.return_value = None - drac_raid._commit_to_controllers(self.node, - controllers=controllers, - substep=substep) - - self.assertEqual(0, mock_commit_config.call_count) - self.assertNotIn('raid_config_job_ids', self.node.driver_internal_info) - self.assertEqual(substep, - self.node.driver_internal_info['raid_config_substep']) - - def test_abandon_config(self, mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - - drac_raid.abandon_config(self.node, 'controller1') - - mock_client.abandon_pending_raid_changes.assert_called_once_with( - 'controller1') - - def test_abandon_config_fail(self, mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - exc = drac_exceptions.BaseClientException('boom') - mock_client.abandon_pending_raid_changes.side_effect = exc - - self.assertRaises( - exception.DracOperationError, drac_raid.abandon_config, self.node, - 'controller1') - - class DracCreateRaidConfigurationHelpersTestCase(test_utils.BaseDracTest): def setUp(self): @@ -549,1708 +101,6 @@ class DracCreateRaidConfigurationHelpersTestCase(test_utils.BaseDracTest): self.node.target_raid_config = self.target_raid_configuration self.node.save() - def _generate_physical_disks(self): - physical_disks = [] - - for disk in self.physical_disks: - physical_disks.append(test_utils.make_physical_disk(disk)) - - return physical_disks - - def test__filter_logical_disks_root_only(self): - logical_disks = drac_raid._filter_logical_disks( - self.target_raid_configuration['logical_disks'], True, False) - - self.assertEqual(1, len(logical_disks)) - self.assertEqual('root_volume', logical_disks[0]['volume_name']) - - def test__filter_logical_disks_nonroot_only(self): - logical_disks = drac_raid._filter_logical_disks( - self.target_raid_configuration['logical_disks'], False, True) - - self.assertEqual(2, len(logical_disks)) - self.assertEqual('data_volume1', logical_disks[0]['volume_name']) - self.assertEqual('data_volume2', logical_disks[1]['volume_name']) - - def test__filter_logical_disks_excelude_all(self): - logical_disks = drac_raid._filter_logical_disks( - self.target_raid_configuration['logical_disks'], False, False) - - self.assertEqual(0, len(logical_disks)) - - def test__calculate_spans_for_2_disk_and_raid_level_1(self): - raid_level = '1' - disks_count = 2 - - spans_count = drac_raid._calculate_spans(raid_level, disks_count) - self.assertEqual(1, spans_count) - - def test__calculate_spans_for_7_disk_and_raid_level_50(self): - raid_level = '5+0' - disks_count = 7 - - spans_count = drac_raid._calculate_spans(raid_level, disks_count) - - self.assertEqual(2, spans_count) - - def test__calculate_spans_for_7_disk_and_raid_level_10(self): - raid_level = '1+0' - disks_count = 7 - - spans_count = drac_raid._calculate_spans(raid_level, disks_count) - self.assertEqual(3, spans_count) - - def test__calculate_spans_for_invalid_raid_level(self): - raid_level = 'foo' - disks_count = 7 - - self.assertRaises(exception.DracOperationError, - drac_raid._calculate_spans, raid_level, disks_count) - - def test__max_volume_size_mb(self): - physical_disks = self._generate_physical_disks() - physical_disk_free_space_mb = {} - for disk in physical_disks: - physical_disk_free_space_mb[disk] = disk.free_size_mb - - max_size = drac_raid._max_volume_size_mb( - '5', physical_disks[0:3], physical_disk_free_space_mb) - - self.assertEqual(1143552, max_size) - - def test__volume_usage_per_disk_mb(self): - logical_disk = { - 'size_mb': 102400, - 'raid_level': '5', - 'disk_type': 'hdd', - 'interface_type': 'sas', - 'volume_name': 'data_volume1'} - physical_disks = self._generate_physical_disks() - - usage_per_disk = drac_raid._volume_usage_per_disk_mb(logical_disk, - physical_disks) - - self.assertEqual(14656, usage_per_disk) - - def test__find_configuration(self): - logical_disks = [ - {'size_mb': 102400, - 'raid_level': '5', - 'is_root_volume': True, - 'disk_type': 'hdd'} - ] - physical_disks = self._generate_physical_disks() - expected_contoller = 'RAID.Integrated.1-1' - expected_physical_disk_ids = [ - 'Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1', - 'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1', - 'Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1'] - - logical_disks = drac_raid._find_configuration(logical_disks, - physical_disks, - False) - - self.assertEqual(expected_contoller, - logical_disks[0]['controller']) - self.assertEqual(expected_physical_disk_ids, - logical_disks[0]['physical_disks']) - - def test__find_configuration_with_more_than_min_disks_for_raid_level(self): - logical_disks = [ - {'size_mb': 3072000, - 'raid_level': '5', - 'is_root_volume': True, - 'disk_type': 'hdd'} - ] - physical_disks = self._generate_physical_disks() - expected_contoller = 'RAID.Integrated.1-1' - expected_physical_disk_ids = [ - 'Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1', - 'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1', - 'Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1', - 'Disk.Bay.3:Enclosure.Internal.0-1:RAID.Integrated.1-1', - 'Disk.Bay.4:Enclosure.Internal.0-1:RAID.Integrated.1-1', - 'Disk.Bay.5:Enclosure.Internal.0-1:RAID.Integrated.1-1', - 'Disk.Bay.6:Enclosure.Internal.0-1:RAID.Integrated.1-1'] - - logical_disks = drac_raid._find_configuration(logical_disks, - physical_disks, - False) - - self.assertEqual(expected_contoller, - logical_disks[0]['controller']) - self.assertEqual(expected_physical_disk_ids, - logical_disks[0]['physical_disks']) - - def test__find_configuration_all_steps(self): - logical_disks = [ - # step 1 - {'size_mb': 102400, - 'raid_level': '1', - 'physical_disks': [ - 'Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1', - 'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1']}, - # step 2 - {'size_mb': 51200, - 'raid_level': '5'}, - # step 3 - {'size_mb': 'MAX', - 'raid_level': '0', - 'physical_disks': [ - 'Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1', - 'Disk.Bay.3:Enclosure.Internal.0-1:RAID.Integrated.1-1']}, - ] - physical_disks = self._generate_physical_disks() - - logical_disks = drac_raid._find_configuration(logical_disks, - physical_disks, - False) - - self.assertEqual(3, len(logical_disks)) - # step 1 - self.assertIn( - {'raid_level': '1', - 'size_mb': 102400, - 'controller': 'RAID.Integrated.1-1', - 'span_depth': 1, - 'span_length': 2, - 'physical_disks': [ - 'Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1', - 'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1']}, - logical_disks) - # step 2 - self.assertIn( - {'raid_level': '5', - 'size_mb': 51200, - 'controller': 'RAID.Integrated.1-1', - 'span_depth': 1, - 'span_length': 3, - 'physical_disks': [ - 'Disk.Bay.4:Enclosure.Internal.0-1:RAID.Integrated.1-1', - 'Disk.Bay.5:Enclosure.Internal.0-1:RAID.Integrated.1-1', - 'Disk.Bay.6:Enclosure.Internal.0-1:RAID.Integrated.1-1']}, - logical_disks) - # step 3 - self.assertIn( - {'raid_level': '0', - 'size_mb': 1143552, - 'controller': 'RAID.Integrated.1-1', - 'span_depth': 1, - 'span_length': 2, - 'physical_disks': [ - 'Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1', - 'Disk.Bay.3:Enclosure.Internal.0-1:RAID.Integrated.1-1']}, - logical_disks) - - def test__find_configuration_pending_delete(self): - logical_disks = [ - {'size_mb': 102400, - 'raid_level': '5', - 'is_root_volume': True, - 'disk_type': 'hdd'} - ] - physical_disks = self._generate_physical_disks() - # No free space, but deletion pending means they're still usable. - physical_disks = [disk._replace(free_size_mb=0) - for disk in physical_disks] - - expected_contoller = 'RAID.Integrated.1-1' - expected_physical_disk_ids = [ - 'Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1', - 'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1', - 'Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1'] - - logical_disks = drac_raid._find_configuration(logical_disks, - physical_disks, - True) - - self.assertEqual(expected_contoller, - logical_disks[0]['controller']) - self.assertEqual(expected_physical_disk_ids, - logical_disks[0]['physical_disks']) - - @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'list_physical_disks', autospec=True) - def test__validate_volume_size_requested_more_than_actual_size( - self, mock_list_physical_disks, mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - self.logical_disk = { - 'physical_disks': [ - 'Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1', - 'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1', - 'Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1', - 'Disk.Bay.3:Enclosure.Internal.0-1:RAID.Integrated.1-1', - 'Disk.Bay.4:Enclosure.Internal.0-1:RAID.Integrated.1-1', - 'Disk.Bay.5:Enclosure.Internal.0-1:RAID.Integrated.1-1', - 'Disk.Bay.6:Enclosure.Internal.0-1:RAID.Integrated.1-1', - 'Disk.Bay.7:Enclosure.Internal.0-1:RAID.Integrated.1-1'], - 'raid_level': '1+0', 'is_root_volume': True, - 'size_mb': 102400000, - 'controller': 'RAID.Integrated.1-1'} - - self.logical_disks = [self.logical_disk.copy()] - self.target_raid_configuration = {'logical_disks': self.logical_disks} - self.node.target_raid_config = self.target_raid_configuration - self.node.save() - - physical_disks = self._generate_physical_disks() - mock_list_physical_disks.return_value = physical_disks - - processed_logical_disks = drac_raid._validate_volume_size( - self.node, self.node.target_raid_config['logical_disks']) - - self.assertEqual(2287104, processed_logical_disks[0]['size_mb']) - - @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'list_physical_disks', autospec=True) - def test__validate_volume_size_requested_less_than_actual_size( - self, mock_list_physical_disks, mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - self.logical_disk = { - 'physical_disks': [ - 'Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1', - 'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1', - 'Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1', - 'Disk.Bay.3:Enclosure.Internal.0-1:RAID.Integrated.1-1', - 'Disk.Bay.4:Enclosure.Internal.0-1:RAID.Integrated.1-1', - 'Disk.Bay.5:Enclosure.Internal.0-1:RAID.Integrated.1-1', - 'Disk.Bay.6:Enclosure.Internal.0-1:RAID.Integrated.1-1', - 'Disk.Bay.7:Enclosure.Internal.0-1:RAID.Integrated.1-1'], - 'raid_level': '1+0', 'is_root_volume': True, - 'size_mb': 204800, - 'controller': 'RAID.Integrated.1-1'} - - self.logical_disks = [self.logical_disk.copy()] - self.target_raid_configuration = {'logical_disks': self.logical_disks} - self.node.target_raid_config = self.target_raid_configuration - self.node.save() - - physical_disks = self._generate_physical_disks() - mock_list_physical_disks.return_value = physical_disks - - processed_logical_disks = drac_raid._validate_volume_size( - self.node, self.node.target_raid_config['logical_disks']) - - self.assertEqual(self.logical_disk, processed_logical_disks[0]) - - -class DracRaidInterfaceTestCase(test_utils.BaseDracTest): - - def setUp(self): - super(DracRaidInterfaceTestCase, self).setUp() - self.node = obj_utils.create_test_node(self.context, - driver='idrac', - driver_info=INFO_DICT) - - self.physical_disk = { - 'id': 'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1', - 'description': ('Disk 1 in Backplane 1 of ' - 'Integrated RAID Controller 1'), - 'controller': 'RAID.Integrated.1-1', - 'manufacturer': 'SEAGATE', - 'model': 'ST600MM0006', - 'media_type': 'hdd', - 'interface_type': 'sas', - 'size_mb': 571776, - 'free_size_mb': 571776, - 'serial_number': 'S0M3EY2Z', - 'firmware_version': 'LS0A', - 'status': 'ok', - 'raid_status': 'ready', - 'sas_address': '500056B37789ABE3', - 'device_protocol': None} - - self.physical_disks = [] - for i in range(8): - disk = self.physical_disk.copy() - disk['id'] = ('Disk.Bay.%s:Enclosure.Internal.0-1:' - 'RAID.Integrated.1-1' % i) - disk['serial_number'] = 'serial%s' % i - - self.physical_disks.append(disk) - - self.root_logical_disk = { - 'size_gb': 50, - 'raid_level': '1', - 'disk_type': 'hdd', - 'interface_type': 'sas', - 'volume_name': 'root_volume', - 'is_root_volume': True - } - self.nonroot_logical_disks = [ - {'size_gb': 100, - 'raid_level': '5', - 'disk_type': 'hdd', - 'interface_type': 'sas', - 'volume_name': 'data_volume1'}, - {'size_gb': 100, - 'raid_level': '5', - 'disk_type': 'hdd', - 'interface_type': 'sas', - 'volume_name': 'data_volume2'} - ] - - self.logical_disks = ( - [self.root_logical_disk] + self.nonroot_logical_disks) - self.target_raid_configuration = {'logical_disks': self.logical_disks} - self.node.target_raid_config = self.target_raid_configuration - self.node.clean_step = {'foo': 'bar'} - self.node.save() - - def _generate_physical_disks(self): - physical_disks = [] - - for disk in self.physical_disks: - physical_disks.append(test_utils.make_physical_disk(disk)) - - return physical_disks - - @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, '_reset_raid_config', autospec=True) - @mock.patch.object(drac_raid, 'list_physical_disks', autospec=True) - @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'change_physical_disk_state', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'commit_config', spec_set=True, - autospec=True) - def _test_create_configuration( - self, expected_state, - mock_commit_config, - mock_change_physical_disk_state, - mock_validate_job_queue, - mock_list_physical_disks, - mock__reset_raid_config, mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - physical_disks = self._generate_physical_disks() - mock_list_physical_disks.return_value = physical_disks - - raid_controller_dict = { - 'id': 'RAID.Integrated.1-1', - 'description': 'Integrated RAID Controller 1', - 'manufacturer': 'DELL', - 'model': 'PERC H710 Mini', - 'primary_status': 'ok', - 'firmware_version': '21.3.0-0009', - 'bus': '1', - 'supports_realtime': True} - raid_controller = test_utils.make_raid_controller( - raid_controller_dict) - mock_client.list_raid_controllers.return_value = [raid_controller] - mock__reset_raid_config.return_value = { - 'is_reboot_required': constants.RebootRequired.optional, - 'is_commit_required': True} - mock_change_physical_disk_state.return_value = { - 'is_reboot_required': constants.RebootRequired.optional, - 'conversion_results': { - 'RAID.Integrated.1-1': { - 'is_reboot_required': constants.RebootRequired.optional, - 'is_commit_required': True}}, - 'commit_required_ids': ['RAID.Integrated.1-1']} - mock_commit_config.side_effect = ['42'] - next_substep = "create_virtual_disks" - - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - return_value = task.driver.raid.create_configuration( - task, create_root_volume=True, create_nonroot_volumes=False) - - mock_commit_config.assert_called_with( - task.node, raid_controller='RAID.Integrated.1-1', reboot=False, - realtime=True) - - self.assertEqual(expected_state, return_value) - self.assertEqual(1, mock_commit_config.call_count) - self.assertEqual(1, mock_change_physical_disk_state.call_count) - - self.node.refresh() - self.assertEqual(True, - task.node.driver_internal_info[ - 'volume_validation']) - self.assertEqual(next_substep, - task.node.driver_internal_info[ - 'raid_config_substep']) - self.assertEqual(['42'], - task.node.driver_internal_info[ - 'raid_config_job_ids']) - - def test_create_configuration_in_clean(self): - self._test_create_configuration(states.CLEANWAIT) - - def test_create_configuration_in_deploy(self): - self.node.clean_step = None - self.node.save() - self._test_create_configuration(states.DEPLOYWAIT) - - @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, '_reset_raid_config', autospec=True) - @mock.patch.object(drac_raid, 'list_physical_disks', autospec=True) - @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'change_physical_disk_state', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'commit_config', spec_set=True, - autospec=True) - def test_create_configuration_without_drives_conversion( - self, mock_commit_config, - mock_change_physical_disk_state, - mock_validate_job_queue, mock_list_physical_disks, - mock__reset_raid_config, mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - physical_disks = self._generate_physical_disks() - mock_list_physical_disks.return_value = physical_disks - - raid_controller_dict = { - 'id': 'RAID.Integrated.1-1', - 'description': 'Integrated RAID Controller 1', - 'manufacturer': 'DELL', - 'model': 'PERC H710 Mini', - 'primary_status': 'ok', - 'firmware_version': '21.3.0-0009', - 'bus': '1', - 'supports_realtime': True} - raid_controller = test_utils.make_raid_controller( - raid_controller_dict) - mock_client.list_raid_controllers.return_value = [raid_controller] - mock__reset_raid_config.return_value = { - 'is_reboot_required': constants.RebootRequired.false, - 'is_commit_required': True} - mock_change_physical_disk_state.return_value = { - 'is_reboot_required': constants.RebootRequired.false, - 'conversion_results': { - 'RAID.Integrated.1-1': { - 'is_reboot_required': constants.RebootRequired.false, - 'is_commit_required': False}}, - 'commit_required_ids': ['RAID.Integrated.1-1']} - mock_client.create_virtual_disk.return_value = { - 'is_reboot_required': constants.RebootRequired.optional, - 'is_commit_required': True} - mock_commit_config.side_effect = ['42'] - next_substep = "completed" - - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - return_value = task.driver.raid.create_configuration( - task, create_root_volume=True, create_nonroot_volumes=False) - - mock_commit_config.assert_called_with( - task.node, raid_controller='RAID.Integrated.1-1', reboot=False, - realtime=True) - - self.assertEqual(states.CLEANWAIT, return_value) - self.assertEqual(1, mock_commit_config.call_count) - self.assertEqual(1, mock_change_physical_disk_state.call_count) - self.assertEqual(1, mock_client.create_virtual_disk.call_count) - - self.node.refresh() - self.assertEqual(False, - task.node.driver_internal_info[ - 'volume_validation']) - self.assertEqual(next_substep, - task.node.driver_internal_info[ - 'raid_config_substep']) - self.assertEqual(['42'], - task.node.driver_internal_info[ - 'raid_config_job_ids']) - - @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'list_physical_disks', autospec=True) - @mock.patch.object(drac_raid, 'change_physical_disk_state', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'commit_config', spec_set=True, - autospec=True) - def test_create_configuration_no_change( - self, mock_commit_config, - mock_change_physical_disk_state, - mock_list_physical_disks, mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - physical_disks = self._generate_physical_disks() - mock_list_physical_disks.return_value = physical_disks - mock_change_physical_disk_state.return_value = { - 'is_reboot_required': constants.RebootRequired.optional, - 'conversion_results': { - 'RAID.Integrated.1-1': { - 'is_reboot_required': constants.RebootRequired.false, - 'is_commit_required': False}}, - 'commit_required_ids': ['RAID.Integrated.1-1']} - mock_commit_config.return_value = '42' - - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - return_value = task.driver.raid.create_configuration( - task, create_root_volume=False, create_nonroot_volumes=False, - delete_existing=False) - - self.assertEqual(False, - task.node.driver_internal_info[ - 'volume_validation']) - self.assertEqual(0, mock_client.create_virtual_disk.call_count) - self.assertEqual(0, mock_commit_config.call_count) - - self.assertIsNone(return_value) - - self.node.refresh() - self.assertNotIn('raid_config_job_ids', self.node.driver_internal_info) - - @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, '_reset_raid_config', autospec=True) - @mock.patch.object(drac_raid, 'list_virtual_disks', autospec=True) - @mock.patch.object(drac_raid, 'list_raid_settings', autospec=True) - @mock.patch.object(drac_raid, 'list_physical_disks', autospec=True) - @mock.patch.object(drac_raid, 'change_physical_disk_state', spec_set=True, - autospec=True) - @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'commit_config', spec_set=True, - autospec=True) - def test_create_configuration_delete_existing( - self, mock_commit_config, - mock_validate_job_queue, - mock_change_physical_disk_state, - mock_list_physical_disks, - mock_list_raid_settings, - mock_list_virtual_disks, - mock__reset_raid_config, - mock_get_drac_client): - self.node.clean_step = None - self.node.save() - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - - physical_disks = self._generate_physical_disks() - raid_controller_dict = { - 'id': 'RAID.Integrated.1-1', - 'description': 'Integrated RAID Controller 1', - 'manufacturer': 'DELL', - 'model': 'PERC H710 Mini', - 'primary_status': 'ok', - 'firmware_version': '21.3.0-0009', - 'bus': '1', - 'supports_realtime': True} - raid_controller = test_utils.make_raid_controller( - raid_controller_dict) - - raid_attr = "RAID.Integrated.1-1:RAIDCurrentControllerMode" - raid_controller_config = { - 'id': 'RAID.Integrated.1-1:RAIDCurrentControllerMode', - 'current_value': ['RAID'], - 'read_only': True, - 'name': 'RAIDCurrentControllerMode', - 'possible_values': ['RAID', 'Enhanced HBA']} - raid_cntrl_settings = { - raid_attr: test_utils.create_raid_setting(raid_controller_config)} - - mock_list_raid_settings.return_value = raid_cntrl_settings - mock_list_physical_disks.return_value = physical_disks - mock_commit_config.side_effect = ['12'] - mock_client.list_raid_controllers.return_value = [raid_controller] - mock__reset_raid_config.return_value = { - 'is_reboot_required': constants.RebootRequired.optional, - 'is_commit_required': True} - - mock_change_physical_disk_state.return_value = { - 'is_reboot_required': constants.RebootRequired.optional, - 'conversion_results': { - 'RAID.Integrated.1-1': { - 'is_reboot_required': constants.RebootRequired.optional, - 'is_commit_required': True}}, - 'commit_required_ids': ['RAID.Integrated.1-1']} - - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - return_value = task.driver.raid.create_configuration( - task, create_root_volume=True, create_nonroot_volumes=False, - delete_existing=True) - - self.assertEqual(True, - task.node.driver_internal_info[ - 'volume_validation']) - mock_commit_config.assert_called_with( - task.node, raid_controller='RAID.Integrated.1-1', - realtime=True, reboot=False) - - self.assertEqual(1, mock_commit_config.call_count) - - self.assertEqual(states.DEPLOYWAIT, return_value) - self.node.refresh() - self.assertEqual(['12'], - self.node.driver_internal_info['raid_config_job_ids']) - - @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'list_physical_disks', autospec=True) - @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'change_physical_disk_state', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'commit_config', spec_set=True, - autospec=True) - def test_create_configuration_with_nested_raid_level( - self, mock_commit_config, mock_change_physical_disk_state, - mock_validate_job_queue, mock_list_physical_disks, - mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - - self.root_logical_disk = { - 'size_gb': 100, - 'raid_level': '5+0', - 'is_root_volume': True - } - - self.logical_disks = [self.root_logical_disk] - self.target_raid_configuration = {'logical_disks': self.logical_disks} - self.node.target_raid_config = self.target_raid_configuration - self.node.save() - - physical_disks = self._generate_physical_disks() - mock_list_physical_disks.return_value = physical_disks - mock_commit_config.side_effect = ['42'] - mock_change_physical_disk_state.return_value = { - 'is_reboot_required': constants.RebootRequired.optional, - 'conversion_results': { - 'RAID.Integrated.1-1': { - 'is_reboot_required': constants.RebootRequired.optional, - 'is_commit_required': True}}, - 'commit_required_ids': ['RAID.Integrated.1-1']} - - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - task.driver.raid.create_configuration( - task, create_root_volume=True, create_nonroot_volumes=True, - delete_existing=False) - - self.assertEqual(True, - task.node.driver_internal_info[ - 'volume_validation']) - # Commits to the controller - mock_commit_config.assert_called_with( - mock.ANY, raid_controller='RAID.Integrated.1-1', reboot=False, - realtime=True) - - self.assertEqual(1, mock_commit_config.call_count) - self.assertEqual(1, mock_change_physical_disk_state.call_count) - - self.node.refresh() - self.assertEqual(['42'], - self.node.driver_internal_info['raid_config_job_ids']) - - @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'list_physical_disks', autospec=True) - @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'change_physical_disk_state', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'commit_config', spec_set=True, - autospec=True) - def test_create_configuration_with_nested_raid_10( - self, mock_commit_config, mock_change_physical_disk_state, - mock_validate_job_queue, mock_list_physical_disks, - mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - - self.root_logical_disk = { - 'size_gb': 100, - 'raid_level': '1+0', - 'is_root_volume': True - } - self.logical_disks = [self.root_logical_disk] - self.target_raid_configuration = {'logical_disks': self.logical_disks} - self.node.target_raid_config = self.target_raid_configuration - self.node.save() - - physical_disks = self._generate_physical_disks() - mock_list_physical_disks.return_value = physical_disks - - mock_commit_config.side_effect = ['42'] - mock_change_physical_disk_state.return_value = { - 'is_reboot_required': constants.RebootRequired.optional, - 'conversion_results': { - 'RAID.Integrated.1-1': { - 'is_reboot_required': constants.RebootRequired.optional, - 'is_commit_required': True}}, - 'commit_required_ids': ['RAID.Integrated.1-1']} - - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - task.driver.raid.create_configuration( - task, create_root_volume=True, create_nonroot_volumes=True, - delete_existing=False) - - self.assertEqual(True, - task.node.driver_internal_info[ - 'volume_validation']) - # Commits to the controller - mock_commit_config.assert_called_with( - mock.ANY, raid_controller='RAID.Integrated.1-1', reboot=False, - realtime=True) - - self.assertEqual(1, mock_commit_config.call_count) - self.assertEqual(1, mock_change_physical_disk_state.call_count) - - self.node.refresh() - self.assertEqual(['42'], - self.node.driver_internal_info['raid_config_job_ids']) - - @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'list_physical_disks', autospec=True) - @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'change_physical_disk_state', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'commit_config', spec_set=True, - autospec=True) - def test_create_configuration_with_multiple_controllers( - self, mock_commit_config, mock_change_physical_disk_state, - mock_validate_job_queue, mock_list_physical_disks, - mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - - self.physical_disks[0]['controller'] = 'controller-2' - self.physical_disks[1]['controller'] = 'controller-2' - physical_disks = self._generate_physical_disks() - mock_list_physical_disks.return_value = physical_disks - - mock_commit_config.side_effect = ['42'] - - mock_change_physical_disk_state.return_value = { - 'is_reboot_required': constants.RebootRequired.optional, - 'conversion_results': { - 'RAID.Integrated.1-1': { - 'is_reboot_required': constants.RebootRequired.optional, - 'is_commit_required': True}}, - 'commit_required_ids': ['RAID.Integrated.1-1']} - - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - task.driver.raid.create_configuration( - task, create_root_volume=True, create_nonroot_volumes=True, - delete_existing=False) - - self.assertEqual(True, - task.node.driver_internal_info[ - 'volume_validation']) - self.node.refresh() - self.assertEqual(['42'], - self.node.driver_internal_info['raid_config_job_ids']) - - @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'list_physical_disks', autospec=True) - @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'change_physical_disk_state', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'commit_config', spec_set=True, - autospec=True) - def test_create_configuration_with_backing_physical_disks( - self, mock_commit_config, mock_change_physical_disk_state, - mock_validate_job_queue, mock_list_physical_disks, - mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - - self.root_logical_disk['physical_disks'] = [ - 'Disk.Bay.3:Enclosure.Internal.0-1:RAID.Integrated.1-1', - 'Disk.Bay.4:Enclosure.Internal.0-1:RAID.Integrated.1-1'] - self.logical_disks = ( - [self.root_logical_disk] + self.nonroot_logical_disks) - self.target_raid_configuration = {'logical_disks': self.logical_disks} - self.node.target_raid_config = self.target_raid_configuration - self.node.save() - - physical_disks = self._generate_physical_disks() - mock_list_physical_disks.return_value = physical_disks - - mock_commit_config.side_effect = ['42'] - mock_change_physical_disk_state.return_value = { - 'is_reboot_required': constants.RebootRequired.optional, - 'conversion_results': { - 'RAID.Integrated.1-1': { - 'is_reboot_required': constants.RebootRequired.optional, - 'is_commit_required': True}}, - 'commit_required_ids': ['RAID.Integrated.1-1']} - - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - task.driver.raid.create_configuration( - task, create_root_volume=True, create_nonroot_volumes=True, - delete_existing=False) - - self.assertEqual(True, - task.node.driver_internal_info[ - 'volume_validation']) - # Commits to the controller - mock_commit_config.assert_called_with( - mock.ANY, raid_controller='RAID.Integrated.1-1', - reboot=False, realtime=True) - - self.node.refresh() - self.assertEqual(['42'], - self.node.driver_internal_info['raid_config_job_ids']) - - @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'list_physical_disks', autospec=True) - @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'change_physical_disk_state', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'commit_config', spec_set=True, - autospec=True) - def test_create_configuration_with_predefined_number_of_physical_disks( - self, mock_commit_config, mock_change_physical_disk_state, - mock_validate_job_queue, mock_list_physical_disks, - mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - - self.root_logical_disk['raid_level'] = '0' - self.root_logical_disk['number_of_physical_disks'] = 3 - self.logical_disks = ( - [self.root_logical_disk, self.nonroot_logical_disks[0]]) - self.target_raid_configuration = {'logical_disks': self.logical_disks} - self.node.target_raid_config = self.target_raid_configuration - self.node.save() - - physical_disks = self._generate_physical_disks() - mock_list_physical_disks.return_value = physical_disks - mock_commit_config.side_effect = ['42'] - mock_change_physical_disk_state.return_value = { - 'is_reboot_required': constants.RebootRequired.optional, - 'conversion_results': { - 'RAID.Integrated.1-1': { - 'is_reboot_required': constants.RebootRequired.optional, - 'is_commit_required': True}}, - 'commit_required_ids': ['RAID.Integrated.1-1']} - - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - task.driver.raid.create_configuration( - task, create_root_volume=True, create_nonroot_volumes=True, - delete_existing=False) - - self.assertEqual(True, - task.node.driver_internal_info[ - 'volume_validation']) - # Commits to the controller - mock_commit_config.assert_called_with( - mock.ANY, raid_controller='RAID.Integrated.1-1', - reboot=False, realtime=True) - - self.node.refresh() - self.assertEqual(['42'], - self.node.driver_internal_info['raid_config_job_ids']) - - @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'list_physical_disks', autospec=True) - @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'change_physical_disk_state', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'commit_config', spec_set=True, - autospec=True) - def test_create_configuration_with_max_size( - self, mock_commit_config, mock_change_physical_disk_state, - mock_validate_job_queue, mock_list_physical_disks, - mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - - self.root_logical_disk = { - 'size_gb': 'MAX', - 'raid_level': '1', - 'physical_disks': [ - 'Disk.Bay.4:Enclosure.Internal.0-1:RAID.Integrated.1-1', - 'Disk.Bay.5:Enclosure.Internal.0-1:RAID.Integrated.1-1'], - 'is_root_volume': True - } - self.logical_disks = ([self.root_logical_disk] - + self.nonroot_logical_disks) - self.target_raid_configuration = {'logical_disks': self.logical_disks} - self.node.target_raid_config = self.target_raid_configuration - self.node.save() - - physical_disks = self._generate_physical_disks() - mock_list_physical_disks.return_value = physical_disks - - mock_commit_config.side_effect = ['12'] - mock_change_physical_disk_state.return_value = { - 'is_reboot_required': constants.RebootRequired.optional, - 'conversion_results': { - 'RAID.Integrated.1-1': { - 'is_reboot_required': constants.RebootRequired.optional, - 'is_commit_required': True}}, - 'commit_required_ids': ['RAID.Integrated.1-1']} - - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - task.driver.raid.create_configuration( - task, create_root_volume=True, create_nonroot_volumes=True, - delete_existing=False) - - self.assertEqual(True, - task.node.driver_internal_info[ - 'volume_validation']) - # Commits to the controller - mock_commit_config.assert_called_with( - mock.ANY, raid_controller='RAID.Integrated.1-1', reboot=False, - realtime=True) - - self.node.refresh() - self.assertEqual(['12'], - self.node.driver_internal_info['raid_config_job_ids']) - - @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'list_physical_disks', autospec=True) - def test_create_configuration_with_max_size_without_backing_disks( - self, mock_list_physical_disks, mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - - self.root_logical_disk = { - 'size_gb': 'MAX', - 'raid_level': '1', - 'is_root_volume': True - } - self.logical_disks = [self.root_logical_disk] - self.target_raid_configuration = {'logical_disks': self.logical_disks} - self.node.target_raid_config = self.target_raid_configuration - self.node.save() - - self.physical_disks = self.physical_disks[0:2] - physical_disks = self._generate_physical_disks() - mock_list_physical_disks.return_value = physical_disks - - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - self.assertRaises( - exception.InvalidParameterValue, - task.driver.raid.create_configuration, - task) - - @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'list_physical_disks', autospec=True) - @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'change_physical_disk_state', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'commit_config', spec_set=True, - autospec=True) - def test_create_configuration_with_share_physical_disks( - self, mock_commit_config, mock_change_physical_disk_state, - mock_validate_job_queue, mock_list_physical_disks, - mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - - self.nonroot_logical_disks[0]['share_physical_disks'] = True - self.nonroot_logical_disks[1]['share_physical_disks'] = True - self.logical_disks = self.nonroot_logical_disks - self.target_raid_configuration = {'logical_disks': self.logical_disks} - self.node.target_raid_config = self.target_raid_configuration - self.node.save() - - self.physical_disks = self.physical_disks[0:3] - physical_disks = self._generate_physical_disks() - mock_list_physical_disks.return_value = physical_disks - - mock_commit_config.side_effect = ['42'] - mock_change_physical_disk_state.return_value = { - 'is_reboot_required': constants.RebootRequired.optional, - 'conversion_results': { - 'RAID.Integrated.1-1': { - 'is_reboot_required': constants.RebootRequired.optional, - 'is_commit_required': True}}, - 'commit_required_ids': ['RAID.Integrated.1-1']} - - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - task.driver.raid.create_configuration( - task, create_root_volume=True, create_nonroot_volumes=True, - delete_existing=False) - - self.assertEqual(True, - task.node.driver_internal_info[ - 'volume_validation']) - # Commits to the controller - mock_commit_config.assert_called_with( - mock.ANY, raid_controller='RAID.Integrated.1-1', reboot=False, - realtime=True) - - self.node.refresh() - self.assertEqual(['42'], - self.node.driver_internal_info['raid_config_job_ids']) - - @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, '_reset_raid_config', autospec=True) - @mock.patch.object(drac_raid, 'list_physical_disks', autospec=True) - @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'commit_config', spec_set=True, - autospec=True) - def test_create_configuration_fails_with_sharing_disabled( - self, mock_commit_config, mock_validate_job_queue, - mock_list_physical_disks, mock__reset_raid_config, - mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - - self.nonroot_logical_disks[0]['share_physical_disks'] = False - self.nonroot_logical_disks[1]['share_physical_disks'] = False - self.logical_disks = self.nonroot_logical_disks - self.target_raid_configuration = {'logical_disks': self.logical_disks} - self.node.target_raid_config = self.target_raid_configuration - self.node.save() - - self.physical_disks = self.physical_disks[0:3] - physical_disks = self._generate_physical_disks() - mock_list_physical_disks.return_value = physical_disks - raid_controller_dict = { - 'id': 'RAID.Integrated.1-1', - 'description': 'Integrated RAID Controller 1', - 'manufacturer': 'DELL', - 'model': 'PERC H710 Mini', - 'primary_status': 'ok', - 'firmware_version': '21.3.0-0009', - 'bus': '1', - 'supports_realtime': True} - raid_controller = test_utils.make_raid_controller( - raid_controller_dict) - mock_client.list_raid_controllers.return_value = [raid_controller] - mock_commit_config.return_value = '42' - - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - self.assertRaises( - exception.DracOperationError, - task.driver.raid.create_configuration, - task, create_root_volume=True, create_nonroot_volumes=True) - - @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'list_physical_disks', autospec=True) - @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'change_physical_disk_state', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'commit_config', spec_set=True, - autospec=True) - def test_create_configuration_with_max_size_and_share_physical_disks( - self, mock_commit_config, mock_change_physical_disk_state, - mock_validate_job_queue, mock_list_physical_disks, - mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - - self.nonroot_logical_disks[0]['share_physical_disks'] = True - self.nonroot_logical_disks[0]['size_gb'] = 'MAX' - self.nonroot_logical_disks[0]['physical_disks'] = [ - 'Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1', - 'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1', - 'Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1'] - self.nonroot_logical_disks[1]['share_physical_disks'] = True - self.logical_disks = self.nonroot_logical_disks - self.target_raid_configuration = {'logical_disks': self.logical_disks} - self.node.target_raid_config = self.target_raid_configuration - self.node.save() - - self.physical_disks = self.physical_disks[0:3] - physical_disks = self._generate_physical_disks() - mock_list_physical_disks.return_value = physical_disks - - mock_commit_config.side_effect = ['42'] - mock_change_physical_disk_state.return_value = { - 'is_reboot_required': constants.RebootRequired.optional, - 'conversion_results': { - 'RAID.Integrated.1-1': { - 'is_reboot_required': constants.RebootRequired.optional, - 'is_commit_required': True}}, - 'commit_required_ids': ['RAID.Integrated.1-1']} - - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - task.driver.raid.create_configuration( - task, create_root_volume=True, create_nonroot_volumes=True, - delete_existing=False) - - self.assertEqual(True, - task.node.driver_internal_info[ - 'volume_validation']) - # Commits to the controller - mock_commit_config.assert_called_with( - mock.ANY, raid_controller='RAID.Integrated.1-1', reboot=False, - realtime=True) - - self.node.refresh() - self.assertEqual(['42'], self.node.driver_internal_info[ - 'raid_config_job_ids']) - - @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'list_physical_disks', autospec=True) - @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'commit_config', spec_set=True, - autospec=True) - def test_create_configuration_with_multiple_max_and_sharing_same_disks( - self, mock_commit_config, mock_validate_job_queue, - mock_list_physical_disks, mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - - self.nonroot_logical_disks[0]['share_physical_disks'] = True - self.nonroot_logical_disks[0]['size_gb'] = 'MAX' - self.nonroot_logical_disks[0]['physical_disks'] = [ - 'Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1', - 'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1', - 'Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1'] - self.nonroot_logical_disks[1]['share_physical_disks'] = True - self.nonroot_logical_disks[1]['size_gb'] = 'MAX' - self.nonroot_logical_disks[1]['physical_disks'] = [ - 'Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1', - 'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1', - 'Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1'] - self.logical_disks = self.nonroot_logical_disks - self.target_raid_configuration = {'logical_disks': self.logical_disks} - self.node.target_raid_config = self.target_raid_configuration - self.node.save() - - self.physical_disks = self.physical_disks[0:3] - physical_disks = self._generate_physical_disks() - mock_list_physical_disks.return_value = physical_disks - - mock_commit_config.return_value = '42' - - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - self.assertRaises( - exception.DracOperationError, - task.driver.raid.create_configuration, - task, create_root_volume=True, create_nonroot_volumes=True, - delete_existing=False) - - @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, '_reset_raid_config', autospec=True) - @mock.patch.object(drac_raid, 'list_physical_disks', autospec=True) - @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'commit_config', spec_set=True, - autospec=True) - def test_create_configuration_fails_if_not_enough_space( - self, mock_commit_config, - mock_validate_job_queue, mock_list_physical_disks, - mock__reset_raid_config, mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - - self.logical_disk = { - 'size_gb': 500, - 'raid_level': '1' - } - self.logical_disks = [self.logical_disk, self.logical_disk] - self.target_raid_configuration = {'logical_disks': self.logical_disks} - self.node.target_raid_config = self.target_raid_configuration - self.node.save() - - self.physical_disks = self.physical_disks[0:3] - physical_disks = self._generate_physical_disks() - mock_list_physical_disks.return_value = physical_disks - raid_controller_dict = { - 'id': 'RAID.Integrated.1-1', - 'description': 'Integrated RAID Controller 1', - 'manufacturer': 'DELL', - 'model': 'PERC H710 Mini', - 'primary_status': 'ok', - 'firmware_version': '21.3.0-0009', - 'bus': '1', - 'supports_realtime': True} - raid_controller = test_utils.make_raid_controller( - raid_controller_dict) - mock_client.list_raid_controllers.return_value = [raid_controller] - mock__reset_raid_config.return_value = { - 'is_reboot_required': constants.RebootRequired.optional, - 'is_commit_required': True} - - mock_commit_config.return_value = '42' - - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - self.assertRaises( - exception.DracOperationError, - task.driver.raid.create_configuration, - task, create_root_volume=True, create_nonroot_volumes=True) - - @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, '_reset_raid_config', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'list_physical_disks', autospec=True) - @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'commit_config', spec_set=True, - autospec=True) - def test_create_configuration_fails_if_disk_already_reserved( - self, mock_commit_config, - mock_validate_job_queue, mock_list_physical_disks, - mock__reset_raid_config, mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - - self.logical_disk = { - 'size_gb': 500, - 'raid_level': '1', - 'physical_disks': [ - 'Disk.Bay.4:Enclosure.Internal.0-1:RAID.Integrated.1-1', - 'Disk.Bay.5:Enclosure.Internal.0-1:RAID.Integrated.1-1'], - } - self.logical_disks = [self.logical_disk, self.logical_disk.copy()] - self.target_raid_configuration = {'logical_disks': self.logical_disks} - self.node.target_raid_config = self.target_raid_configuration - self.node.save() - - physical_disks = self._generate_physical_disks() - mock_list_physical_disks.return_value = physical_disks - - raid_controller_dict = { - 'id': 'RAID.Integrated.1-1', - 'description': 'Integrated RAID Controller 1', - 'manufacturer': 'DELL', - 'model': 'PERC H710 Mini', - 'primary_status': 'ok', - 'firmware_version': '21.3.0-0009', - 'bus': '1', - 'supports_realtime': True} - raid_controller = test_utils.make_raid_controller( - raid_controller_dict) - mock_client.list_raid_controllers.return_value = [raid_controller] - - mock_commit_config.return_value = '42' - mock__reset_raid_config.return_value = { - 'is_reboot_required': constants.RebootRequired.optional, - 'is_commit_required': True} - - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - self.assertRaises( - exception.DracOperationError, - task.driver.raid.create_configuration, - task, create_root_volume=True, create_nonroot_volumes=True) - - @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, '_reset_raid_config', autospec=True) - @mock.patch.object(drac_raid, 'list_raid_controllers', autospec=True) - @mock.patch.object(drac_raid, 'list_raid_settings', autospec=True) - @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'commit_config', spec_set=True, - autospec=True) - def _test_delete_configuration(self, expected_state, - mock_commit_config, - mock_validate_job_queue, - mock_list_raid_settings, - mock_list_raid_controllers, - mock__reset_raid_config, - mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - raid_attr = "RAID.Integrated.1-1:RAIDCurrentControllerMode" - raid_controller_config = { - 'id': 'RAID.Integrated.1-1:RAIDCurrentControllerMode', - 'current_value': ['RAID'], - 'read_only': True, - 'name': 'RAIDCurrentControllerMode', - 'possible_values': ['RAID', 'Enhanced HBA']} - - raid_cntrl_settings = { - raid_attr: test_utils.create_raid_setting(raid_controller_config)} - - raid_controller_dict = { - 'id': 'RAID.Integrated.1-1', - 'description': 'Integrated RAID Controller 1', - 'manufacturer': 'DELL', - 'model': 'PERC H710 Mini', - 'primary_status': 'ok', - 'firmware_version': '21.3.0-0009', - 'bus': '1', - 'supports_realtime': True} - - mock_list_raid_controllers.return_value = [ - test_utils.make_raid_controller(raid_controller_dict)] - mock_list_raid_settings.return_value = raid_cntrl_settings - mock_commit_config.return_value = '42' - mock__reset_raid_config.return_value = { - 'is_reboot_required': constants.RebootRequired.optional, - 'is_commit_required': True} - - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - return_value = task.driver.raid.delete_configuration(task) - - mock_commit_config.assert_called_once_with( - task.node, raid_controller='RAID.Integrated.1-1', reboot=False, - realtime=True) - - self.assertEqual(expected_state, return_value) - self.node.refresh() - self.assertEqual(['42'], - self.node.driver_internal_info['raid_config_job_ids']) - - def test_delete_configuration_in_clean(self): - self._test_delete_configuration(states.CLEANWAIT) - - def test_delete_configuration_in_deploy(self): - self.node.clean_step = None - self.node.save() - self._test_delete_configuration(states.DEPLOYWAIT) - - @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'list_raid_controllers', autospec=True) - @mock.patch.object(drac_raid, 'list_raid_settings', autospec=True) - @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'commit_config', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, '_reset_raid_config', spec_set=True, - autospec=True) - def test_delete_configuration_with_mix_realtime_controller_in_raid_mode( - self, mock__reset_raid_config, mock_commit_config, - mock_validate_job_queue, mock_list_raid_settings, - mock_list_raid_controllers, mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - expected_raid_config_params = ['AHCI.Slot.3-1', 'RAID.Integrated.1-1'] - mix_controllers = [{'id': 'AHCI.Slot.3-1', - 'description': 'AHCI controller in slot 3', - 'manufacturer': 'DELL', - 'model': 'BOSS-S1', - 'primary_status': 'unknown', - 'firmware_version': '2.5.13.3016', - 'bus': '5E', - 'supports_realtime': False}, - {'id': 'RAID.Integrated.1-1', - 'description': 'Integrated RAID Controller 1', - 'manufacturer': 'DELL', - 'model': 'PERC H740 Mini', - 'primary_status': 'unknown', - 'firmware_version': '50.5.0-1750', - 'bus': '3C', - 'supports_realtime': True}] - - mock_list_raid_controllers.return_value = [ - test_utils.make_raid_controller(controller) for - controller in mix_controllers] - - raid_controller_config = [ - {'id': 'AHCI.Slot.3-1:RAIDCurrentControllerMode', - 'current_value': ['RAID'], - 'read_only': True, - 'name': 'RAIDCurrentControllerMode', - 'possible_values': ['RAID', 'Enhanced HBA']}, - {'id': 'RAID.Integrated.1-1:RAIDCurrentControllerMode', - 'current_value': ['RAID'], - 'read_only': True, - 'name': 'RAIDCurrentControllerMode', - 'possible_values': ['RAID', 'Enhanced HBA']}] - - raid_settings = defaultdict() - for sett in raid_controller_config: - raid_settings[sett.get('id')] = test_utils.create_raid_setting( - sett) - - mock_list_raid_settings.return_value = raid_settings - - mock_commit_config.side_effect = ['42', '12'] - mock__reset_raid_config.side_effect = [{ - 'is_reboot_required': constants.RebootRequired.optional, - 'is_commit_required': True - }, { - 'is_reboot_required': constants.RebootRequired.true, - 'is_commit_required': True - }] - - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - return_value = task.driver.raid.delete_configuration(task) - - mock_commit_config.assert_has_calls( - [mock.call(mock.ANY, raid_controller='AHCI.Slot.3-1', - reboot=False, realtime=False), - mock.call(mock.ANY, raid_controller='RAID.Integrated.1-1', - reboot=True, realtime=False)], - any_order=True) - - self.assertEqual(states.CLEANWAIT, return_value) - self.node.refresh() - self.assertEqual(expected_raid_config_params, - self.node.driver_internal_info[ - 'raid_config_parameters']) - self.assertEqual(['42', '12'], - self.node.driver_internal_info['raid_config_job_ids']) - - @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'list_raid_controllers', autospec=True) - @mock.patch.object(drac_raid, 'list_raid_settings', autospec=True) - @mock.patch.object(drac_job, 'list_unfinished_jobs', autospec=True) - @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'set_raid_settings', autospec=True) - @mock.patch.object(drac_raid, 'commit_config', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, '_reset_raid_config', spec_set=True, - autospec=True) - def test_delete_configuration_with_mix_realtime_controller_in_ehba_mode( - self, mock__reset_raid_config, mock_commit_config, - mock_set_raid_settings, mock_validate_job_queue, - mock_list_unfinished_jobs, mock_list_raid_settings, - mock_list_raid_controllers, mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - expected_raid_config_params = ['RAID.Integrated.1-1', 'AHCI.Slot.3-1'] - mix_controllers = [{'id': 'RAID.Integrated.1-1', - 'description': 'Integrated RAID Controller 1', - 'manufacturer': 'DELL', - 'model': 'PERC H740 Mini', - 'primary_status': 'unknown', - 'firmware_version': '50.5.0-1750', - 'bus': '3C', - 'supports_realtime': True}, - {'id': 'AHCI.Slot.3-1', - 'description': 'AHCI controller in slot 3', - 'manufacturer': 'DELL', - 'model': 'BOSS-S1', - 'primary_status': 'unknown', - 'firmware_version': '2.5.13.3016', - 'bus': '5E', - 'supports_realtime': False}] - - mock_list_raid_controllers.return_value = [ - test_utils.make_raid_controller(controller) for - controller in mix_controllers] - raid_controller_config = [ - {'id': 'RAID.Integrated.1-1:RAIDCurrentControllerMode', - 'current_value': ['Enhanced HBA'], - 'read_only': True, - 'name': 'RAIDCurrentControllerMode', - 'possible_values': ['RAID', 'Enhanced HBA']}, - {'id': 'AHCI.Slot.3-1:RAIDCurrentControllerMode', - 'current_value': ['RAID'], - 'read_only': True, - 'name': 'RAIDCurrentControllerMode', - 'possible_values': ['RAID', 'Enhanced HBA']}] - - raid_settings = defaultdict() - for sett in raid_controller_config: - raid_settings[sett.get('id')] = test_utils.create_raid_setting( - sett) - - mock_list_raid_settings.return_value = raid_settings - mock_list_unfinished_jobs.return_value = [] - mock_commit_config.side_effect = ['42', '12', '13'] - mock__reset_raid_config.side_effect = [{ - 'is_reboot_required': constants.RebootRequired.optional, - 'is_commit_required': True - }, { - 'is_reboot_required': constants.RebootRequired.true, - 'is_commit_required': True - }] - mock_set_raid_settings.return_value = { - 'is_reboot_required': constants.RebootRequired.true, - 'is_commit_required': True} - - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - return_value = task.driver.raid.delete_configuration(task) - mock_commit_config.assert_has_calls( - [mock.call(mock.ANY, raid_controller='RAID.Integrated.1-1', - reboot=False, realtime=True), - mock.call(mock.ANY, raid_controller='AHCI.Slot.3-1', - reboot=False, realtime=False), - mock.call(mock.ANY, raid_controller='RAID.Integrated.1-1', - reboot=True, realtime=False)], - any_order=True) - - self.assertEqual(states.CLEANWAIT, return_value) - self.node.refresh() - self.assertEqual(expected_raid_config_params, - self.node.driver_internal_info[ - 'raid_config_parameters']) - self.assertEqual(['42', '12', '13'], - self.node.driver_internal_info['raid_config_job_ids']) - - @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'list_raid_controllers', autospec=True) - @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'commit_config', spec_set=True, - autospec=True) - def test_delete_configuration_no_change(self, mock_commit_config, - mock_validate_job_queue, - mock_list_raid_controllers, - mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - mock_list_raid_controllers.return_value = [] - - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - return_value = task.driver.raid.delete_configuration(task) - - self.assertEqual(0, mock_client._reset_raid_config.call_count) - self.assertEqual(0, mock_commit_config.call_count) - - self.assertIsNone(return_value) - - self.node.refresh() - self.assertNotIn('raid_config_job_ids', self.node.driver_internal_info) - - @mock.patch.object(drac_raid, 'list_virtual_disks', autospec=True) - def test_get_logical_disks(self, mock_list_virtual_disks): - virtual_disk_dict = { - 'id': 'Disk.Virtual.0:RAID.Integrated.1-1', - 'name': 'disk 0', - 'description': 'Virtual Disk 0 on Integrated RAID Controller 1', - 'controller': 'RAID.Integrated.1-1', - 'raid_level': '1', - 'size_mb': 571776, - 'status': 'ok', - 'raid_status': 'online', - 'span_depth': 1, - 'span_length': 2, - 'pending_operations': None, - 'physical_disks': []} - mock_list_virtual_disks.return_value = [ - test_utils.make_virtual_disk(virtual_disk_dict)] - expected_logical_disk = {'id': 'Disk.Virtual.0:RAID.Integrated.1-1', - 'size_gb': 558, - 'raid_level': '1', - 'name': 'disk 0', - 'controller': 'RAID.Integrated.1-1'} - - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - props = task.driver.raid.get_logical_disks(task) - - self.assertEqual({'logical_disks': [expected_logical_disk]}, - props) - - @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'clear_foreign_config', spec_set=True, - autospec=True) - @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, - autospec=True) - def test__execute_foreign_drives_with_no_foreign_drives( - self, mock_validate_job_queue, - mock_clear_foreign_config, - mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - - raid_config_params = ['RAID.Integrated.1-1'] - raid_config_substep = 'clear_foreign_config' - driver_internal_info = self.node.driver_internal_info - driver_internal_info['raid_config_parameters'] = raid_config_params - driver_internal_info['raid_config_substep'] = raid_config_substep - self.node.driver_internal_info = driver_internal_info - self.node.save() - mock_clear_foreign_config.return_value = { - 'is_reboot_required': constants.RebootRequired.false, - 'is_commit_required': False - } - - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - return_value = task.driver.raid._execute_foreign_drives( - task, self.node) - - self.assertIsNone(return_value) - - @mock.patch.object(drac_common, 'get_drac_client', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'clear_foreign_config', spec_set=True, - autospec=True) - @mock.patch.object(drac_job, 'validate_job_queue', spec_set=True, - autospec=True) - @mock.patch.object(drac_raid, 'commit_config', spec_set=True, - autospec=True) - def test__execute_foreign_drives_with_foreign_drives( - self, mock_commit_config, - mock_validate_job_queue, - mock_clear_foreign_config, - mock_get_drac_client): - mock_client = mock.Mock() - mock_get_drac_client.return_value = mock_client - - raid_config_params = ['RAID.Integrated.1-1'] - raid_config_substep = 'clear_foreign_config' - driver_internal_info = self.node.driver_internal_info - driver_internal_info['raid_config_parameters'] = raid_config_params - driver_internal_info['raid_config_substep'] = raid_config_substep - self.node.driver_internal_info = driver_internal_info - self.node.save() - mock_clear_foreign_config.return_value = { - 'is_reboot_required': constants.RebootRequired.optional, - 'is_commit_required': True - } - mock_commit_config.return_value = '42' - - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - return_value = task.driver.raid._execute_foreign_drives( - task, self.node) - - self.assertEqual(states.CLEANWAIT, return_value) - - self.assertEqual(['42'], - self.node.driver_internal_info['raid_config_job_ids']) - self.assertEqual('physical_disk_conversion', - self.node.driver_internal_info['raid_config_substep']) - self.assertEqual( - ['RAID.Integrated.1-1'], - self.node.driver_internal_info['raid_config_parameters']) - mock_commit_config.assert_called_once_with( - self.node, raid_controller='RAID.Integrated.1-1', reboot=False, - realtime=True) - - @mock.patch.object(base.RAIDInterface, 'apply_configuration', - autospec=True) - def test_apply_configuration(self, mock_apply_configuration): - with task_manager.acquire(self.context, self.node.uuid, - shared=False) as task: - task.driver.raid.apply_configuration( - task, self.target_raid_configuration, - create_root_volume=False, create_nonroot_volumes=True, - delete_existing=False) - - mock_apply_configuration.assert_called_once_with( - task.driver.raid, task, - self.target_raid_configuration, False, True, False) - class DracRedfishRAIDTestCase(test_utils.BaseDracTest): diff --git a/ironic/tests/unit/drivers/modules/drac/utils.py b/ironic/tests/unit/drivers/modules/drac/utils.py index bc248b2377..e04e30ca24 100644 --- a/ironic/tests/unit/drivers/modules/drac/utils.py +++ b/ironic/tests/unit/drivers/modules/drac/utils.py @@ -13,17 +13,12 @@ import collections -from oslo_utils import importutils - from ironic.tests.unit.db import base as db_base from ironic.tests.unit.db import utils as db_utils INFO_DICT = db_utils.get_test_drac_info() -dracclient_job = importutils.try_import('dracclient.resources.job') -dracclient_raid = importutils.try_import('dracclient.resources.raid') - class BaseDracTest(db_base.DbTestCase): def setUp(self): @@ -31,14 +26,15 @@ class BaseDracTest(db_base.DbTestCase): self.config(enabled_hardware_types=['idrac', 'fake-hardware'], enabled_boot_interfaces=[ 'idrac-redfish-virtual-media', 'fake'], - enabled_power_interfaces=['idrac-wsman', 'fake'], - enabled_management_interfaces=['idrac-wsman', 'fake'], + enabled_power_interfaces=['idrac-redfish', 'fake'], + enabled_management_interfaces=['idrac-redfish', 'fake'], enabled_inspect_interfaces=[ - 'idrac-wsman', 'fake', 'no-inspect'], + 'idrac-redfish', 'fake', 'no-inspect'], enabled_vendor_interfaces=[ - 'idrac-wsman', 'fake', 'no-vendor'], - enabled_raid_interfaces=['idrac-wsman', 'fake', 'no-raid'], - enabled_bios_interfaces=['idrac-wsman', 'no-bios']) + 'idrac-redfish', 'fake', 'no-vendor'], + enabled_raid_interfaces=['idrac-redfish', 'fake', + 'no-raid'], + enabled_bios_interfaces=['idrac-redfish', 'no-bios']) class DictToObj(object): @@ -74,30 +70,6 @@ def dict_of_object(data): return data -def make_job(job_dict): - tuple_class = dracclient_job.Job if dracclient_job else None - return dict_to_namedtuple(values=job_dict, - tuple_class=tuple_class) - - -def make_raid_controller(raid_controller_dict): - tuple_class = dracclient_raid.RAIDController if dracclient_raid else None - return dict_to_namedtuple(values=raid_controller_dict, - tuple_class=tuple_class) - - -def make_virtual_disk(virtual_disk_dict): - tuple_class = dracclient_raid.VirtualDisk if dracclient_raid else None - return dict_to_namedtuple(values=virtual_disk_dict, - tuple_class=tuple_class) - - -def make_physical_disk(physical_disk_dict): - tuple_class = dracclient_raid.PhysicalDisk if dracclient_raid else None - return dict_to_namedtuple(values=physical_disk_dict, - tuple_class=tuple_class) - - def create_raid_setting(raid_settings_dict): """Returns the raid configuration tuple object""" return dict_to_namedtuple(values=raid_settings_dict) diff --git a/ironic/tests/unit/drivers/test_drac.py b/ironic/tests/unit/drivers/test_drac.py index 6af1c2de67..60e7cccee5 100644 --- a/ironic/tests/unit/drivers/test_drac.py +++ b/ironic/tests/unit/drivers/test_drac.py @@ -34,21 +34,17 @@ class IDRACHardwareTestCase(db_base.DbTestCase): self.config(enabled_hardware_types=['idrac'], enabled_boot_interfaces=[ 'idrac-redfish-virtual-media', 'ipxe', 'pxe'], - enabled_management_interfaces=[ - 'idrac', 'idrac-redfish', 'idrac-wsman'], - enabled_power_interfaces=[ - 'idrac', 'idrac-redfish', 'idrac-wsman'], + enabled_management_interfaces=['idrac-redfish'], + enabled_power_interfaces=['idrac-redfish'], enabled_inspect_interfaces=[ - 'idrac', 'idrac-redfish', 'idrac-wsman', 'inspector', + 'idrac-redfish', 'inspector', 'no-inspect'], enabled_network_interfaces=['flat', 'neutron', 'noop'], enabled_raid_interfaces=[ - 'idrac', 'idrac-wsman', 'idrac-redfish', 'no-raid', + 'idrac-redfish', 'no-raid', 'agent'], - enabled_vendor_interfaces=[ - 'idrac', 'idrac-wsman', 'no-vendor'], - enabled_bios_interfaces=[ - 'idrac-wsman', 'idrac-redfish', 'no-bios']) + enabled_vendor_interfaces=['idrac-redfish', 'no-vendor'], + enabled_bios_interfaces=['idrac-redfish', 'no-bios']) def _validate_interfaces(self, driver, **kwargs): self.assertIsInstance( @@ -59,14 +55,14 @@ class IDRACHardwareTestCase(db_base.DbTestCase): kwargs.get('deploy', agent.AgentDeploy)) self.assertIsInstance( driver.management, - kwargs.get('management', drac.management.DracWSManManagement)) + kwargs.get('management', drac.management.DracRedfishManagement)) self.assertIsInstance( driver.power, - kwargs.get('power', drac.power.DracWSManPower)) + kwargs.get('power', drac.power.DracRedfishPower)) self.assertIsInstance( driver.bios, - kwargs.get('bios', drac.bios.DracWSManBIOS)) + kwargs.get('bios', drac.bios.DracRedfishBIOS)) self.assertIsInstance( driver.console, @@ -74,7 +70,7 @@ class IDRACHardwareTestCase(db_base.DbTestCase): self.assertIsInstance( driver.inspect, - kwargs.get('inspect', drac.inspect.DracWSManInspect)) + kwargs.get('inspect', drac.inspect.DracRedfishInspect)) self.assertIsInstance( driver.network, @@ -82,7 +78,7 @@ class IDRACHardwareTestCase(db_base.DbTestCase): self.assertIsInstance( driver.raid, - kwargs.get('raid', drac.raid.DracWSManRAID)) + kwargs.get('raid', drac.raid.DracRedfishRAID)) self.assertIsInstance( driver.storage, @@ -90,7 +86,8 @@ class IDRACHardwareTestCase(db_base.DbTestCase): self.assertIsInstance( driver.vendor, - kwargs.get('vendor', drac.vendor_passthru.DracWSManVendorPassthru)) + kwargs.get('vendor', + drac.vendor_passthru.DracRedfishVendorPassthru)) def test_default_interfaces(self): node = obj_utils.create_test_node(self.context, driver='idrac') @@ -130,22 +127,6 @@ class IDRACHardwareTestCase(db_base.DbTestCase): self._validate_interfaces(task.driver, vendor=noop.NoVendor) - def test_override_with_idrac(self): - node = obj_utils.create_test_node(self.context, driver='idrac', - management_interface='idrac', - power_interface='idrac', - inspect_interface='idrac', - raid_interface='idrac', - vendor_interface='idrac') - with task_manager.acquire(self.context, node.id) as task: - self._validate_interfaces( - task.driver, - management=drac.management.DracManagement, - power=drac.power.DracPower, - inspect=drac.inspect.DracInspect, - raid=drac.raid.DracRAID, - vendor=drac.vendor_passthru.DracVendorPassthru) - def test_override_with_redfish_management_and_power(self): node = obj_utils.create_test_node(self.context, driver='idrac', management_interface='idrac-redfish', diff --git a/ironic/tests/unit/drivers/third_party_driver_mock_specs.py b/ironic/tests/unit/drivers/third_party_driver_mock_specs.py index e6c429b95c..adac0f748f 100644 --- a/ironic/tests/unit/drivers/third_party_driver_mock_specs.py +++ b/ironic/tests/unit/drivers/third_party_driver_mock_specs.py @@ -17,36 +17,6 @@ """This module provides mock 'specs' for third party modules that can be used when needing to mock those third party modules""" -# python-dracclient -DRACCLIENT_SPEC = ( - 'client', - 'constants', - 'exceptions', -) - -DRACCLIENT_CLIENT_MOD_SPEC = ( - 'DRACClient', -) - -DRACCLIENT_CONSTANTS_MOD_SPEC = ( - 'POWER_OFF', - 'POWER_ON', - 'REBOOT', - 'RebootRequired', - 'RaidStatus' -) - -DRACCLIENT_CONSTANTS_REBOOT_REQUIRED_MOD_SPEC = ( - 'true', - 'optional', - 'false' -) - -DRACCLIENT_CONSTANTS_RAID_STATUS_MOD_SPEC = ( - 'jbod', - 'raid' -) - # sushy_oem_idrac SUSHY_OEM_IDRAC_MOD_SPEC = ( 'PHYSICAL_DISK_STATE_MODE_RAID', diff --git a/ironic/tests/unit/drivers/third_party_driver_mocks.py b/ironic/tests/unit/drivers/third_party_driver_mocks.py index ddaf240160..a6abd86710 100644 --- a/ironic/tests/unit/drivers/third_party_driver_mocks.py +++ b/ironic/tests/unit/drivers/third_party_driver_mocks.py @@ -26,7 +26,6 @@ Current list of mocked libraries: - proliantutils - pysnmp - scciclient -- python-dracclient - sushy_oem_idrac """ @@ -79,50 +78,6 @@ if not redfish: if 'ironic.drivers.redfish' in sys.modules: importlib.reload(sys.modules['ironic.drivers.modules.redfish']) -# attempt to load the external 'python-dracclient' library, which is required -# by the optional drivers.modules.drac module -dracclient = importutils.try_import('dracclient') -if not dracclient: - dracclient = mock.MagicMock(spec_set=mock_specs.DRACCLIENT_SPEC) - dracclient.client = mock.MagicMock( - spec_set=mock_specs.DRACCLIENT_CLIENT_MOD_SPEC) - dracclient.constants = mock.MagicMock( - spec_set=mock_specs.DRACCLIENT_CONSTANTS_MOD_SPEC, - POWER_OFF=mock.sentinel.POWER_OFF, - POWER_ON=mock.sentinel.POWER_ON, - REBOOT=mock.sentinel.REBOOT) - dracclient.constants.RebootRequired = mock.MagicMock( - spec_set=mock_specs.DRACCLIENT_CONSTANTS_REBOOT_REQUIRED_MOD_SPEC, - true=mock.sentinel.true, - optional=mock.sentinel.optional, - false=mock.sentinel.false) - dracclient.constants.RaidStatus = mock.MagicMock( - spec_set=mock_specs.DRACCLIENT_CONSTANTS_RAID_STATUS_MOD_SPEC, - jbod=mock.sentinel.jbod, - raid=mock.sentinel.raid) - - sys.modules['dracclient'] = dracclient - sys.modules['dracclient.client'] = dracclient.client - sys.modules['dracclient.constants'] = dracclient.constants - sys.modules['dracclient.exceptions'] = dracclient.exceptions - dracclient.exceptions.BaseClientException = type('BaseClientException', - (Exception,), {}) - - dracclient.exceptions.DRACRequestFailed = type( - 'DRACRequestFailed', (dracclient.exceptions.BaseClientException,), {}) - - class DRACOperationFailed(dracclient.exceptions.DRACRequestFailed): - def __init__(self, **kwargs): - super(DRACOperationFailed, self).__init__( - 'DRAC operation failed. Messages: %(drac_messages)s' % kwargs) - - dracclient.exceptions.DRACOperationFailed = DRACOperationFailed - - # Now that the external library has been mocked, if anything had already - # loaded any of the drivers, reload them. - if 'ironic.drivers.modules.drac' in sys.modules: - importlib.reload(sys.modules['ironic.drivers.modules.drac']) - sushy_oem_idrac = importutils.try_import('sushy_oem_idrac') if not sushy_oem_idrac: raidmode = mock.sentinel.PHYSICAL_DISK_STATE_MODE_RAID diff --git a/releasenotes/notes/remove-deprecated-idrac-wsman-c31861acd8742eb9.yaml b/releasenotes/notes/remove-deprecated-idrac-wsman-c31861acd8742eb9.yaml new file mode 100644 index 0000000000..4a9ab79180 --- /dev/null +++ b/releasenotes/notes/remove-deprecated-idrac-wsman-c31861acd8742eb9.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - | + The deprecated ``idrac-wsman`` and related ``idrac`` interface + aliases have been removed from the ``idrac`` hardware type. diff --git a/setup.cfg b/setup.cfg index a2369e7e8e..412d33a257 100644 --- a/setup.cfg +++ b/setup.cfg @@ -63,7 +63,6 @@ ironic.dhcp = ironic.hardware.interfaces.bios = fake = ironic.drivers.modules.fake:FakeBIOS idrac-redfish = ironic.drivers.modules.drac.bios:DracRedfishBIOS - idrac-wsman = ironic.drivers.modules.drac.bios:DracWSManBIOS ilo = ironic.drivers.modules.ilo.bios:IloBIOS irmc = ironic.drivers.modules.irmc.bios:IRMCBIOS no-bios = ironic.drivers.modules.noop:NoBIOS @@ -108,9 +107,7 @@ ironic.hardware.interfaces.firmware = ironic.hardware.interfaces.inspect = agent = ironic.drivers.modules.inspector:AgentInspect fake = ironic.drivers.modules.fake:FakeInspect - idrac = ironic.drivers.modules.drac.inspect:DracInspect idrac-redfish = ironic.drivers.modules.drac.inspect:DracRedfishInspect - idrac-wsman = ironic.drivers.modules.drac.inspect:DracWSManInspect ilo = ironic.drivers.modules.ilo.inspect:IloInspect inspector = ironic.drivers.modules.inspector:Inspector irmc = ironic.drivers.modules.irmc.inspect:IRMCInspect @@ -119,9 +116,7 @@ ironic.hardware.interfaces.inspect = ironic.hardware.interfaces.management = fake = ironic.drivers.modules.fake:FakeManagement - idrac = ironic.drivers.modules.drac.management:DracManagement idrac-redfish = ironic.drivers.modules.drac.management:DracRedfishManagement - idrac-wsman = ironic.drivers.modules.drac.management:DracWSManManagement ilo = ironic.drivers.modules.ilo.management:IloManagement ilo5 = ironic.drivers.modules.ilo.management:Ilo5Management intel-ipmitool = ironic.drivers.modules.intel_ipmi.management:IntelIPMIManagement @@ -138,9 +133,7 @@ ironic.hardware.interfaces.network = ironic.hardware.interfaces.power = agent = ironic.drivers.modules.agent_power:AgentPower fake = ironic.drivers.modules.fake:FakePower - idrac = ironic.drivers.modules.drac.power:DracPower idrac-redfish = ironic.drivers.modules.drac.power:DracRedfishPower - idrac-wsman = ironic.drivers.modules.drac.power:DracWSManPower ilo = ironic.drivers.modules.ilo.power:IloPower ipmitool = ironic.drivers.modules.ipmitool:IPMIPower irmc = ironic.drivers.modules.irmc.power:IRMCPower @@ -150,9 +143,7 @@ ironic.hardware.interfaces.power = ironic.hardware.interfaces.raid = agent = ironic.drivers.modules.agent:AgentRAID fake = ironic.drivers.modules.fake:FakeRAID - idrac = ironic.drivers.modules.drac.raid:DracRAID idrac-redfish = ironic.drivers.modules.drac.raid:DracRedfishRAID - idrac-wsman = ironic.drivers.modules.drac.raid:DracWSManRAID ilo5 = ironic.drivers.modules.ilo.raid:Ilo5RAID irmc = ironic.drivers.modules.irmc.raid:IRMCRAID no-raid = ironic.drivers.modules.noop:NoRAID @@ -171,8 +162,6 @@ ironic.hardware.interfaces.storage = ironic.hardware.interfaces.vendor = fake = ironic.drivers.modules.fake:FakeVendorB - idrac = ironic.drivers.modules.drac.vendor_passthru:DracVendorPassthru - idrac-wsman = ironic.drivers.modules.drac.vendor_passthru:DracWSManVendorPassthru idrac-redfish = ironic.drivers.modules.drac.vendor_passthru:DracRedfishVendorPassthru ilo = ironic.drivers.modules.ilo.vendor:VendorPassthru irmc = ironic.drivers.modules.irmc.vendor:IRMCVendorPassthru