Remove dependency on ironic-lib
ironic-lib is being retired; this change imports any used code from ironic-lib and updates references. This contains some changes to how we throw exceptions; aligning ironic-lib code with IPA practice to have all exceptions be a RESTError. This also allows us to remove code around serializing ironic-lib exceptions. Change-Id: I137340ce6820c68d8e0f1a32668151bba7b1ddd7
This commit is contained in:
parent
06077cb88e
commit
8b18184e2d
ironic_python_agent
agent.pyburnin.pydevice_hints.pydisk_partitioner.pydisk_utils.pyencoding.pyerrors.py
requirements.txtextensions
hardware.pyhardware_managers/nvidia
inject_files.pyinspect.pyinspector.pymdns.pymetrics_lib
partition_utils.pyqemu_img.pyraid_utils.pytests/unit
base.py
utils.pyextensions
metrics_lib
test_agent.pytest_base.pytest_burnin.pytest_device_hints.pytest_disk_partitioner.pytest_disk_utils.pytest_encoding.pytest_hardware.pytest_inspector.pytest_mdns.pytest_partition_utils.pytest_qemu_img.pytest_raid_utils.pytest_utils.py@ -22,8 +22,6 @@ import time
|
||||
from urllib import parse as urlparse
|
||||
|
||||
import eventlet
|
||||
from ironic_lib import exception as lib_exc
|
||||
from ironic_lib import mdns
|
||||
from oslo_concurrency import processutils
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
@ -36,6 +34,7 @@ from ironic_python_agent.extensions import base
|
||||
from ironic_python_agent import hardware
|
||||
from ironic_python_agent import inspector
|
||||
from ironic_python_agent import ironic_api_client
|
||||
from ironic_python_agent import mdns
|
||||
from ironic_python_agent import netutils
|
||||
from ironic_python_agent import utils
|
||||
|
||||
@ -48,9 +47,6 @@ NETWORK_WAIT_TIMEOUT = 60
|
||||
# Time(in seconds) to wait before reattempt
|
||||
NETWORK_WAIT_RETRY = 5
|
||||
|
||||
cfg.CONF.import_group('metrics', 'ironic_lib.metrics_utils')
|
||||
cfg.CONF.import_group('metrics_statsd', 'ironic_lib.metrics_statsd')
|
||||
|
||||
Host = collections.namedtuple('Host', ['hostname', 'port'])
|
||||
|
||||
|
||||
@ -213,7 +209,7 @@ class IronicPythonAgent(base.ExecuteCommandMixin):
|
||||
if (not api_url or api_url == 'mdns') and not standalone:
|
||||
try:
|
||||
api_url, params = mdns.get_endpoint('baremetal')
|
||||
except lib_exc.ServiceLookupFailure:
|
||||
except errors.ServiceLookupFailure:
|
||||
if api_url:
|
||||
# mDNS explicitly requested, report failure.
|
||||
raise
|
||||
|
@ -14,13 +14,13 @@ import json
|
||||
import socket
|
||||
import time
|
||||
|
||||
from ironic_lib import utils
|
||||
from oslo_concurrency import processutils
|
||||
from oslo_log import log
|
||||
from tooz import coordination
|
||||
|
||||
from ironic_python_agent import errors
|
||||
from ironic_python_agent import hardware
|
||||
from ironic_python_agent import utils
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
314
ironic_python_agent/device_hints.py
Normal file
314
ironic_python_agent/device_hints.py
Normal file
@ -0,0 +1,314 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import copy
|
||||
import re
|
||||
from urllib import parse as urlparse
|
||||
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import specs_matcher
|
||||
from oslo_utils import strutils
|
||||
from oslo_utils import units
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# A dictionary in the form {hint name: hint type}
|
||||
VALID_ROOT_DEVICE_HINTS = {
|
||||
'size': int, 'model': str, 'wwn': str, 'serial': str, 'vendor': str,
|
||||
'wwn_with_extension': str, 'wwn_vendor_extension': str, 'name': str,
|
||||
'rotational': bool, 'hctl': str, 'by_path': str,
|
||||
}
|
||||
|
||||
ROOT_DEVICE_HINTS_GRAMMAR = specs_matcher.make_grammar()
|
||||
|
||||
|
||||
def _extract_hint_operator_and_values(hint_expression, hint_name):
|
||||
"""Extract the operator and value(s) of a root device hint expression.
|
||||
|
||||
A root device hint expression could contain one or more values
|
||||
depending on the operator. This method extracts the operator and
|
||||
value(s) and returns a dictionary containing both.
|
||||
|
||||
:param hint_expression: The hint expression string containing value(s)
|
||||
and operator (optionally).
|
||||
:param hint_name: The name of the hint. Used for logging.
|
||||
:raises: ValueError if the hint_expression is empty.
|
||||
:returns: A dictionary containing:
|
||||
|
||||
:op: The operator. An empty string in case of None.
|
||||
:values: A list of values stripped and converted to lowercase.
|
||||
"""
|
||||
expression = str(hint_expression).strip().lower()
|
||||
if not expression:
|
||||
raise ValueError(f'Root device hint {hint_name} expression is empty')
|
||||
|
||||
# parseString() returns a list of tokens which the operator (if
|
||||
# present) is always the first element.
|
||||
ast = ROOT_DEVICE_HINTS_GRAMMAR.parseString(expression)
|
||||
if len(ast) <= 1:
|
||||
# hint_expression had no operator
|
||||
return {'op': '', 'values': [expression]}
|
||||
|
||||
op = ast[0]
|
||||
return {'values': [v.strip() for v in re.split(op, expression) if v],
|
||||
'op': op}
|
||||
|
||||
|
||||
def _normalize_hint_expression(hint_expression, hint_name):
|
||||
"""Normalize a string type hint expression.
|
||||
|
||||
A string-type hint expression contains one or more operators and
|
||||
one or more values: [<op>] <value> [<op> <value>]*. This normalizes
|
||||
the values by url-encoding white spaces and special characters. The
|
||||
operators are not normalized. For example: the hint value of "<or>
|
||||
foo bar <or> bar" will become "<or> foo%20bar <or> bar".
|
||||
|
||||
:param hint_expression: The hint expression string containing value(s)
|
||||
and operator (optionally).
|
||||
:param hint_name: The name of the hint. Used for logging.
|
||||
:raises: ValueError if the hint_expression is empty.
|
||||
:returns: A normalized string.
|
||||
"""
|
||||
hdict = _extract_hint_operator_and_values(hint_expression, hint_name)
|
||||
result = hdict['op'].join([' %s ' % urlparse.quote(t)
|
||||
for t in hdict['values']])
|
||||
return (hdict['op'] + result).strip()
|
||||
|
||||
|
||||
def _append_operator_to_hints(root_device):
|
||||
"""Add an equal (s== or ==) operator to the hints.
|
||||
|
||||
For backwards compatibility, for root device hints where no operator
|
||||
means equal, this method adds the equal operator to the hint. This is
|
||||
needed when using oslo.utils.specs_matcher methods.
|
||||
|
||||
:param root_device: The root device hints dictionary.
|
||||
"""
|
||||
for name, expression in root_device.items():
|
||||
# NOTE(lucasagomes): The specs_matcher from oslo.utils does not
|
||||
# support boolean, so we don't need to append any operator
|
||||
# for it.
|
||||
if VALID_ROOT_DEVICE_HINTS[name] is bool:
|
||||
continue
|
||||
|
||||
expression = str(expression)
|
||||
ast = ROOT_DEVICE_HINTS_GRAMMAR.parseString(expression)
|
||||
if len(ast) > 1:
|
||||
continue
|
||||
|
||||
op = 's== %s' if VALID_ROOT_DEVICE_HINTS[name] is str else '== %s'
|
||||
root_device[name] = op % expression
|
||||
|
||||
return root_device
|
||||
|
||||
|
||||
def parse_root_device_hints(root_device):
|
||||
"""Parse the root_device property of a node.
|
||||
|
||||
Parses and validates the root_device property of a node. These are
|
||||
hints for how a node's root device is created. The 'size' hint
|
||||
should be a positive integer. The 'rotational' hint should be a
|
||||
Boolean value.
|
||||
|
||||
:param root_device: the root_device dictionary from the node's property.
|
||||
:returns: a dictionary with the root device hints parsed or
|
||||
None if there are no hints.
|
||||
:raises: ValueError, if some information is invalid.
|
||||
|
||||
"""
|
||||
if not root_device:
|
||||
return
|
||||
|
||||
root_device = copy.deepcopy(root_device)
|
||||
|
||||
invalid_hints = set(root_device) - set(VALID_ROOT_DEVICE_HINTS)
|
||||
if invalid_hints:
|
||||
raise ValueError('The hints "%(invalid_hints)s" are invalid. '
|
||||
'Valid hints are: "%(valid_hints)s"' %
|
||||
{'invalid_hints': ', '.join(invalid_hints),
|
||||
'valid_hints': ', '.join(VALID_ROOT_DEVICE_HINTS)})
|
||||
|
||||
for name, expression in root_device.items():
|
||||
hint_type = VALID_ROOT_DEVICE_HINTS[name]
|
||||
if hint_type is str:
|
||||
if not isinstance(expression, str):
|
||||
raise ValueError(
|
||||
'Root device hint "%(name)s" is not a string value. '
|
||||
'Hint expression: %(expression)s' %
|
||||
{'name': name, 'expression': expression})
|
||||
root_device[name] = _normalize_hint_expression(expression, name)
|
||||
|
||||
elif hint_type is int:
|
||||
for v in _extract_hint_operator_and_values(expression,
|
||||
name)['values']:
|
||||
try:
|
||||
integer = int(v)
|
||||
except ValueError:
|
||||
raise ValueError(
|
||||
'Root device hint "%(name)s" is not an integer '
|
||||
'value. Current value: %(expression)s' %
|
||||
{'name': name, 'expression': expression})
|
||||
|
||||
if integer <= 0:
|
||||
raise ValueError(
|
||||
'Root device hint "%(name)s" should be a positive '
|
||||
'integer. Current value: %(expression)s' %
|
||||
{'name': name, 'expression': expression})
|
||||
|
||||
elif hint_type is bool:
|
||||
try:
|
||||
root_device[name] = strutils.bool_from_string(
|
||||
expression, strict=True)
|
||||
except ValueError:
|
||||
raise ValueError(
|
||||
'Root device hint "%(name)s" is not a Boolean value. '
|
||||
'Current value: %(expression)s' %
|
||||
{'name': name, 'expression': expression})
|
||||
|
||||
return _append_operator_to_hints(root_device)
|
||||
|
||||
|
||||
def find_devices_by_hints(devices, root_device_hints):
|
||||
"""Find all devices that match the root device hints.
|
||||
|
||||
Try to find devices that match the root device hints. In order
|
||||
for a device to be matched it needs to satisfy all the given hints.
|
||||
|
||||
:param devices: A list of dictionaries representing the devices
|
||||
containing one or more of the following keys:
|
||||
|
||||
:name: (String) The device name, e.g /dev/sda
|
||||
:size: (Integer) Size of the device in *bytes*
|
||||
:model: (String) Device model
|
||||
:vendor: (String) Device vendor name
|
||||
:serial: (String) Device serial number
|
||||
:wwn: (String) Unique storage identifier
|
||||
:wwn_with_extension: (String): Unique storage identifier with
|
||||
the vendor extension appended
|
||||
:wwn_vendor_extension: (String): United vendor storage identifier
|
||||
:rotational: (Boolean) Whether it's a rotational device or
|
||||
not. Useful to distinguish HDDs (rotational) and SSDs
|
||||
(not rotational).
|
||||
:hctl: (String): The SCSI address: Host, channel, target and lun.
|
||||
For example: '1:0:0:0'.
|
||||
:by_path: (String): The alternative device name,
|
||||
e.g. /dev/disk/by-path/pci-0000:00
|
||||
|
||||
:param root_device_hints: A dictionary with the root device hints.
|
||||
:raises: ValueError, if some information is invalid.
|
||||
:returns: A generator with all matching devices as dictionaries.
|
||||
"""
|
||||
LOG.debug('Trying to find devices from "%(devs)s" that match the '
|
||||
'device hints "%(hints)s"',
|
||||
{'devs': ', '.join([d.get('name') for d in devices]),
|
||||
'hints': root_device_hints})
|
||||
parsed_hints = parse_root_device_hints(root_device_hints)
|
||||
for dev in devices:
|
||||
device_name = dev.get('name')
|
||||
|
||||
for hint in parsed_hints:
|
||||
hint_type = VALID_ROOT_DEVICE_HINTS[hint]
|
||||
device_value = dev.get(hint)
|
||||
hint_value = parsed_hints[hint]
|
||||
|
||||
if hint_type is str:
|
||||
try:
|
||||
device_value = _normalize_hint_expression(device_value,
|
||||
hint)
|
||||
except ValueError:
|
||||
LOG.warning(
|
||||
'The attribute "%(attr)s" of the device "%(dev)s" '
|
||||
'has an empty value. Skipping device.',
|
||||
{'attr': hint, 'dev': device_name})
|
||||
break
|
||||
|
||||
if hint == 'size':
|
||||
# Since we don't support units yet we expect the size
|
||||
# in GiB for now
|
||||
device_value = device_value / units.Gi
|
||||
|
||||
LOG.debug('Trying to match the device hint "%(hint)s" '
|
||||
'with a value of "%(hint_value)s" against the same '
|
||||
'device\'s (%(dev)s) attribute with a value of '
|
||||
'"%(dev_value)s"', {'hint': hint, 'dev': device_name,
|
||||
'hint_value': hint_value,
|
||||
'dev_value': device_value})
|
||||
|
||||
# NOTE(lucasagomes): Boolean hints are not supported by
|
||||
# specs_matcher.match(), so we need to do the comparison
|
||||
# ourselves
|
||||
if hint_type is bool:
|
||||
try:
|
||||
device_value = strutils.bool_from_string(device_value,
|
||||
strict=True)
|
||||
except ValueError:
|
||||
LOG.warning('The attribute "%(attr)s" (with value '
|
||||
'"%(value)s") of device "%(dev)s" is not '
|
||||
'a valid Boolean. Skipping device.',
|
||||
{'attr': hint, 'value': device_value,
|
||||
'dev': device_name})
|
||||
break
|
||||
if device_value == hint_value:
|
||||
continue
|
||||
|
||||
elif specs_matcher.match(device_value, hint_value):
|
||||
continue
|
||||
|
||||
LOG.debug('The attribute "%(attr)s" (with value "%(value)s") '
|
||||
'of device "%(dev)s" does not match the hint %(hint)s',
|
||||
{'attr': hint, 'value': device_value,
|
||||
'dev': device_name, 'hint': hint_value})
|
||||
break
|
||||
else:
|
||||
yield dev
|
||||
|
||||
|
||||
def match_root_device_hints(devices, root_device_hints):
|
||||
"""Try to find a device that matches the root device hints.
|
||||
|
||||
Try to find a device that matches the root device hints. In order
|
||||
for a device to be matched it needs to satisfy all the given hints.
|
||||
|
||||
:param devices: A list of dictionaries representing the devices
|
||||
containing one or more of the following keys:
|
||||
|
||||
:name: (String) The device name, e.g /dev/sda
|
||||
:size: (Integer) Size of the device in *bytes*
|
||||
:model: (String) Device model
|
||||
:vendor: (String) Device vendor name
|
||||
:serial: (String) Device serial number
|
||||
:wwn: (String) Unique storage identifier
|
||||
:wwn_with_extension: (String): Unique storage identifier with
|
||||
the vendor extension appended
|
||||
:wwn_vendor_extension: (String): United vendor storage identifier
|
||||
:rotational: (Boolean) Whether it's a rotational device or
|
||||
not. Useful to distinguish HDDs (rotational) and SSDs
|
||||
(not rotational).
|
||||
:hctl: (String): The SCSI address: Host, channel, target and lun.
|
||||
For example: '1:0:0:0'.
|
||||
:by_path: (String): The alternative device name,
|
||||
e.g. /dev/disk/by-path/pci-0000:00
|
||||
|
||||
:param root_device_hints: A dictionary with the root device hints.
|
||||
:raises: ValueError, if some information is invalid.
|
||||
:returns: The first device to match all the hints or None.
|
||||
"""
|
||||
try:
|
||||
dev = next(find_devices_by_hints(devices, root_device_hints))
|
||||
except StopIteration:
|
||||
LOG.warning('No device found that matches the root device hints %s',
|
||||
root_device_hints)
|
||||
else:
|
||||
LOG.info('Root device found! The device "%s" matches the root '
|
||||
'device hints %s', dev, root_device_hints)
|
||||
return dev
|
@ -22,10 +22,11 @@ https://opendev.org/openstack/ironic-lib/commit/42fa5d63861ba0f04b9a4f67212173d7
|
||||
|
||||
import logging
|
||||
|
||||
from ironic_lib import exception
|
||||
from ironic_lib import utils
|
||||
from oslo_config import cfg
|
||||
|
||||
from ironic_python_agent import errors
|
||||
from ironic_python_agent import utils
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
@ -116,7 +117,7 @@ class DiskPartitioner(object):
|
||||
try:
|
||||
from ironic_python_agent import disk_utils # circular dependency
|
||||
disk_utils.wait_for_disk_to_become_available(self._device)
|
||||
except exception.IronicException as e:
|
||||
raise exception.InstanceDeployFailure(
|
||||
except errors.RESTError as e:
|
||||
raise errors.DeploymentError(
|
||||
('Disk partitioning failed on device %(device)s. '
|
||||
'Error: %(error)s') % {'device': self._device, 'error': e})
|
||||
|
@ -26,8 +26,6 @@ import re
|
||||
import stat
|
||||
import time
|
||||
|
||||
from ironic_lib import exception
|
||||
from ironic_lib import utils
|
||||
from oslo_concurrency import processutils
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import excutils
|
||||
@ -37,6 +35,7 @@ import tenacity
|
||||
from ironic_python_agent import disk_partitioner
|
||||
from ironic_python_agent import errors
|
||||
from ironic_python_agent import qemu_img
|
||||
from ironic_python_agent import utils
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
@ -364,7 +363,7 @@ def is_block_device(dev):
|
||||
msg = ("Unable to stat device %(dev)s after attempting to verify "
|
||||
"%(attempts)d times.") % {'dev': dev, 'attempts': attempts}
|
||||
LOG.error(msg)
|
||||
raise exception.InstanceDeployFailure(msg)
|
||||
raise errors.DeploymentError(msg)
|
||||
|
||||
|
||||
def dd(src, dst, conv_flags=None):
|
||||
@ -578,14 +577,12 @@ def destroy_disk_metadata(dev, node_uuid):
|
||||
|
||||
try:
|
||||
wait_for_disk_to_become_available(dev)
|
||||
except exception.IronicException as e:
|
||||
raise exception.InstanceDeployFailure(
|
||||
_('Destroying metadata failed on device %(device)s. '
|
||||
'Error: %(error)s')
|
||||
% {'device': dev, 'error': e})
|
||||
except errors.RESTError as e:
|
||||
raise errors.DeploymentError(
|
||||
f'Destroying metadata failed on device {dev}s. Error: {e}')
|
||||
|
||||
LOG.info("Disk metadata on %(dev)s successfully destroyed for node "
|
||||
"%(node)s", {'dev': dev, 'node': node_uuid})
|
||||
LOG.info(f"Disk metadata on {dev} successfully destroyed for node "
|
||||
f"{node_uuid}")
|
||||
|
||||
|
||||
def _fix_gpt_structs(device, node_uuid):
|
||||
@ -608,7 +605,7 @@ def _fix_gpt_structs(device, node_uuid):
|
||||
'for node %(node)s. Error: %(error)s' %
|
||||
{'disk': device, 'node': node_uuid, 'error': e})
|
||||
LOG.error(msg)
|
||||
raise exception.InstanceDeployFailure(msg)
|
||||
raise errors.DeploymentError(msg)
|
||||
|
||||
|
||||
def fix_gpt_partition(device, node_uuid):
|
||||
@ -630,7 +627,7 @@ def fix_gpt_partition(device, node_uuid):
|
||||
'for node %(node)s. Error: %(error)s' %
|
||||
{'disk': device, 'node': node_uuid, 'error': e})
|
||||
LOG.error(msg)
|
||||
raise exception.InstanceDeployFailure(msg)
|
||||
raise errors.DeploymentError(msg)
|
||||
|
||||
|
||||
def udev_settle():
|
||||
@ -785,13 +782,13 @@ def wait_for_disk_to_become_available(device):
|
||||
retry(_wait_for_disk)()
|
||||
except tenacity.RetryError:
|
||||
if pids[0]:
|
||||
raise exception.IronicException(
|
||||
raise errors.DeviceNotFound(
|
||||
('Processes with the following PIDs are holding '
|
||||
'device %(device)s: %(pids)s. '
|
||||
'Timed out waiting for completion.')
|
||||
% {'device': device, 'pids': ', '.join(pids[0])})
|
||||
else:
|
||||
raise exception.IronicException(
|
||||
raise errors.DeviceNotFound(
|
||||
('Fuser exited with "%(fuser_err)s" while checking '
|
||||
'locks for device %(device)s. Timed out waiting for '
|
||||
'completion.') % {'device': device, 'fuser_err': stderr[0]})
|
||||
|
@ -15,8 +15,6 @@
|
||||
import json
|
||||
import uuid
|
||||
|
||||
from ironic_lib import exception as lib_exc
|
||||
|
||||
|
||||
class Serializable(object):
|
||||
"""Base class for things that can be serialized."""
|
||||
@ -45,14 +43,6 @@ class SerializableComparable(Serializable):
|
||||
return self.serialize() != other.serialize()
|
||||
|
||||
|
||||
def serialize_lib_exc(exc):
|
||||
"""Serialize an ironic-lib exception."""
|
||||
return {'type': exc.__class__.__name__,
|
||||
'code': exc.code,
|
||||
'message': str(exc),
|
||||
'details': ''}
|
||||
|
||||
|
||||
class RESTJSONEncoder(json.JSONEncoder):
|
||||
"""A slightly customized JSON encoder."""
|
||||
def encode(self, o):
|
||||
@ -78,7 +68,5 @@ class RESTJSONEncoder(json.JSONEncoder):
|
||||
return o.serialize()
|
||||
elif isinstance(o, uuid.UUID):
|
||||
return str(o)
|
||||
elif isinstance(o, lib_exc.IronicException):
|
||||
return serialize_lib_exc(o)
|
||||
else:
|
||||
return json.JSONEncoder.default(self, o)
|
||||
|
@ -385,3 +385,35 @@ class InvalidImage(DeploymentError):
|
||||
|
||||
def __init__(self, details=None):
|
||||
super(InvalidImage, self).__init__(details)
|
||||
|
||||
|
||||
class FileSystemNotSupported(RESTError):
|
||||
"""Error raised when a file system is not supported."""
|
||||
|
||||
def __init__(self, fs):
|
||||
details = (f"Failed to create a file system. File system {fs} is not "
|
||||
"supported.")
|
||||
self.message = details
|
||||
super(RESTError, self).__init__(details)
|
||||
|
||||
|
||||
class InvalidMetricConfig(RESTError):
|
||||
"""Error raised when a metric config is invalid."""
|
||||
|
||||
message = "Invalid value for metrics config option."
|
||||
|
||||
|
||||
class MetricsNotSupported(RESTError):
|
||||
"""Error raised when a metrics action is not supported."""
|
||||
|
||||
message = ("Metrics action is not supported. You may need to "
|
||||
"adjust the [metrics] section in ironic.conf.")
|
||||
|
||||
|
||||
class ServiceLookupFailure(RESTError):
|
||||
"""Error raised when an mdns service lookup fails."""
|
||||
|
||||
def __init__(self, service="unknown"):
|
||||
details = f"Cannot find {service} service through multicast."
|
||||
self.message = details
|
||||
super(RESTError, self).__init__(details)
|
||||
|
@ -12,7 +12,6 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ironic_lib import exception as il_exc
|
||||
from oslo_log import log
|
||||
|
||||
from ironic_python_agent import errors
|
||||
@ -76,7 +75,7 @@ class CleanExtension(base.BaseAgentExtension):
|
||||
try:
|
||||
result = hardware.dispatch_to_managers(step['step'], node, ports,
|
||||
**kwargs)
|
||||
except (errors.RESTError, il_exc.IronicException):
|
||||
except errors.RESTError:
|
||||
LOG.exception('Error performing clean step %s', step['step'])
|
||||
raise
|
||||
except Exception as e:
|
||||
|
@ -10,7 +10,6 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ironic_lib import exception as il_exc
|
||||
from oslo_log import log
|
||||
|
||||
from ironic_python_agent import errors
|
||||
@ -76,7 +75,7 @@ class DeployExtension(base.BaseAgentExtension):
|
||||
try:
|
||||
result = hardware.dispatch_to_managers(step['step'], node, ports,
|
||||
**kwargs)
|
||||
except (errors.RESTError, il_exc.IronicException):
|
||||
except errors.RESTError:
|
||||
LOG.exception('Error performing deploy step %s', step['step'])
|
||||
raise
|
||||
except Exception as e:
|
||||
|
@ -12,7 +12,6 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ironic_lib import exception as il_exc
|
||||
from oslo_log import log
|
||||
|
||||
from ironic_python_agent import errors
|
||||
@ -76,7 +75,7 @@ class ServiceExtension(base.BaseAgentExtension):
|
||||
try:
|
||||
result = hardware.dispatch_to_managers(step['step'], node, ports,
|
||||
**kwargs)
|
||||
except (errors.RESTError, il_exc.IronicException):
|
||||
except errors.RESTError:
|
||||
LOG.exception('Error performing service step %s', step['step'])
|
||||
raise
|
||||
except Exception as e:
|
||||
|
@ -19,7 +19,6 @@ import tempfile
|
||||
import time
|
||||
from urllib import parse as urlparse
|
||||
|
||||
from ironic_lib import exception
|
||||
from oslo_concurrency import processutils
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
@ -402,7 +401,7 @@ def _write_image(image_info, device, configdrive=None):
|
||||
'totaltime': totaltime})
|
||||
try:
|
||||
disk_utils.fix_gpt_partition(device, node_uuid=None)
|
||||
except exception.InstanceDeployFailure:
|
||||
except errors.DeploymentError:
|
||||
# Note: the catch internal to the helper method logs any errors.
|
||||
pass
|
||||
return uuids
|
||||
@ -796,14 +795,14 @@ def _validate_partitioning(device):
|
||||
processutils.ProcessExecutionError, OSError) as e:
|
||||
msg = ("Unable to find a valid partition table on the disk after "
|
||||
f"writing the image. The image may be corrupted. Error: {e}")
|
||||
raise exception.InstanceDeployFailure(msg)
|
||||
raise errors.DeploymentError(msg)
|
||||
|
||||
# Check if there is at least one partition in the partition table after
|
||||
# deploy
|
||||
if not nparts:
|
||||
msg = ("No partitions found on the device {} after writing "
|
||||
"the image.".format(device))
|
||||
raise exception.InstanceDeployFailure(msg)
|
||||
raise errors.DeploymentError(msg)
|
||||
|
||||
|
||||
class StandbyExtension(base.BaseAgentExtension):
|
||||
@ -890,7 +889,7 @@ class StandbyExtension(base.BaseAgentExtension):
|
||||
# Fix any gpt partition
|
||||
try:
|
||||
disk_utils.fix_gpt_partition(device, node_uuid=None)
|
||||
except exception.InstanceDeployFailure:
|
||||
except errors.DeploymentError:
|
||||
# Note: the catch internal to the helper method logs any errors.
|
||||
pass
|
||||
# Fix the root partition UUID
|
||||
|
@ -31,7 +31,6 @@ import string
|
||||
import time
|
||||
from typing import List
|
||||
|
||||
from ironic_lib import utils as il_utils
|
||||
from oslo_concurrency import processutils
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
@ -42,6 +41,7 @@ import stevedore
|
||||
import yaml
|
||||
|
||||
from ironic_python_agent import burnin
|
||||
from ironic_python_agent import device_hints
|
||||
from ironic_python_agent import disk_utils
|
||||
from ironic_python_agent import efi_utils
|
||||
from ironic_python_agent import encoding
|
||||
@ -129,9 +129,9 @@ def _load_ipmi_modules():
|
||||
This is required to be called at least once before attempting to use
|
||||
ipmitool or related tools.
|
||||
"""
|
||||
il_utils.try_execute('modprobe', 'ipmi_msghandler')
|
||||
il_utils.try_execute('modprobe', 'ipmi_devintf')
|
||||
il_utils.try_execute('modprobe', 'ipmi_si')
|
||||
utils.try_execute('modprobe', 'ipmi_msghandler')
|
||||
utils.try_execute('modprobe', 'ipmi_devintf')
|
||||
utils.try_execute('modprobe', 'ipmi_si')
|
||||
|
||||
|
||||
def _load_multipath_modules():
|
||||
@ -150,18 +150,18 @@ def _load_multipath_modules():
|
||||
# which is not *really* required.. at least *shouldn't* be.
|
||||
# WARNING(TheJulia): This command explicitly replaces local
|
||||
# configuration.
|
||||
il_utils.try_execute('/usr/sbin/mpathconf', '--enable',
|
||||
'--find_multipaths', 'yes',
|
||||
'--with_module', 'y',
|
||||
'--with_multipathd', 'y')
|
||||
utils.try_execute('/usr/sbin/mpathconf', '--enable',
|
||||
'--find_multipaths', 'yes',
|
||||
'--with_module', 'y',
|
||||
'--with_multipathd', 'y')
|
||||
else:
|
||||
# Ensure modules are loaded. Configuration is not required
|
||||
# and implied based upon compiled in defaults.
|
||||
# NOTE(TheJulia): Debian/Ubuntu specifically just document
|
||||
# using `multipath -t` output to start a new configuration
|
||||
# file, if needed.
|
||||
il_utils.try_execute('modprobe', 'dm_multipath')
|
||||
il_utils.try_execute('modprobe', 'multipath')
|
||||
utils.try_execute('modprobe', 'dm_multipath')
|
||||
utils.try_execute('modprobe', 'multipath')
|
||||
|
||||
|
||||
def _check_for_iscsi():
|
||||
@ -173,13 +173,13 @@ def _check_for_iscsi():
|
||||
- If no connection is detected we simply return.
|
||||
"""
|
||||
try:
|
||||
il_utils.execute('iscsistart', '-f')
|
||||
utils.execute('iscsistart', '-f')
|
||||
except (processutils.ProcessExecutionError, EnvironmentError) as e:
|
||||
LOG.debug("No iscsi connection detected. Skipping iscsi. "
|
||||
"Error: %s", e)
|
||||
return
|
||||
try:
|
||||
il_utils.execute('iscsistart', '-b')
|
||||
utils.execute('iscsistart', '-b')
|
||||
except processutils.ProcessExecutionError as e:
|
||||
LOG.warning("Something went wrong executing 'iscsistart -b' "
|
||||
"Error: %s", e)
|
||||
@ -192,8 +192,8 @@ def _get_md_uuid(raid_device):
|
||||
:returns: A string containing the UUID of an md device.
|
||||
"""
|
||||
try:
|
||||
out, _ = il_utils.execute('mdadm', '--detail', raid_device,
|
||||
use_standard_locale=True)
|
||||
out, _ = utils.execute('mdadm', '--detail', raid_device,
|
||||
use_standard_locale=True)
|
||||
except processutils.ProcessExecutionError as e:
|
||||
LOG.warning('Could not get the details of %(dev)s: %(err)s',
|
||||
{'dev': raid_device, 'err': e})
|
||||
@ -224,12 +224,12 @@ def _enable_multipath():
|
||||
# the multipathd version in case multipathd is already running.
|
||||
# The safest way to start multipathd is to expect OS error in addition
|
||||
# to the execution error and handle both as inconsequential.
|
||||
il_utils.try_execute('multipathd')
|
||||
utils.try_execute('multipathd')
|
||||
# This is mainly to get the system to actually do the needful and
|
||||
# identify/enumerate paths by combining what it can detect and what
|
||||
# it already knows. This may be useful, and in theory this should be
|
||||
# logged in the IPA log should it be needed.
|
||||
il_utils.execute('multipath', '-ll')
|
||||
utils.execute('multipath', '-ll')
|
||||
except FileNotFoundError as e:
|
||||
LOG.warning('Attempted to determine if multipath tools were present. '
|
||||
'Not detected. Error recorded: %s', e)
|
||||
@ -251,7 +251,7 @@ def _get_multipath_parent_device(device):
|
||||
# Explicitly run the check as regardless of if the device is mpath or
|
||||
# not, multipath tools when using list always exits with a return
|
||||
# code of 0.
|
||||
il_utils.execute('multipath', '-c', check_device)
|
||||
utils.execute('multipath', '-c', check_device)
|
||||
# path check with return an exit code of 1 if you send it a multipath
|
||||
# device mapper device, like dm-0.
|
||||
# NOTE(TheJulia): -ll is supposed to load from all available
|
||||
@ -259,7 +259,7 @@ def _get_multipath_parent_device(device):
|
||||
# that. That being said, it has been about a decade since I was
|
||||
# running multipath tools on SAN connected gear, so my memory is
|
||||
# definitely fuzzy.
|
||||
out, _ = il_utils.execute('multipath', '-ll', check_device)
|
||||
out, _ = utils.execute('multipath', '-ll', check_device)
|
||||
except processutils.ProcessExecutionError as e:
|
||||
# FileNotFoundError if the utility does not exist.
|
||||
# -1 return code if the device is not valid.
|
||||
@ -318,8 +318,8 @@ def get_component_devices(raid_device):
|
||||
ignore_raid=True))
|
||||
for bdev in block_devices:
|
||||
try:
|
||||
out, _ = il_utils.execute('mdadm', '--examine', bdev.name,
|
||||
use_standard_locale=True)
|
||||
out, _ = utils.execute('mdadm', '--examine', bdev.name,
|
||||
use_standard_locale=True)
|
||||
except processutils.ProcessExecutionError as e:
|
||||
if "No md superblock detected" in str(e):
|
||||
# actually not a component device
|
||||
@ -366,8 +366,8 @@ def get_holder_disks(raid_device):
|
||||
return []
|
||||
|
||||
try:
|
||||
out, _ = il_utils.execute('mdadm', '--detail', raid_device,
|
||||
use_standard_locale=True)
|
||||
out, _ = utils.execute('mdadm', '--detail', raid_device,
|
||||
use_standard_locale=True)
|
||||
except processutils.ProcessExecutionError as e:
|
||||
LOG.warning('Could not get holder disks of %(dev)s: %(err)s',
|
||||
{'dev': raid_device, 'err': e})
|
||||
@ -411,7 +411,7 @@ def is_md_device(raid_device):
|
||||
:returns: True if the device is an md device, False otherwise.
|
||||
"""
|
||||
try:
|
||||
il_utils.execute('mdadm', '--detail', raid_device)
|
||||
utils.execute('mdadm', '--detail', raid_device)
|
||||
LOG.debug("%s is an md device", raid_device)
|
||||
return True
|
||||
except FileNotFoundError:
|
||||
@ -434,9 +434,9 @@ def md_restart(raid_device):
|
||||
try:
|
||||
LOG.debug('Restarting software RAID device %s', raid_device)
|
||||
component_devices = get_component_devices(raid_device)
|
||||
il_utils.execute('mdadm', '--stop', raid_device)
|
||||
il_utils.execute('mdadm', '--assemble', raid_device,
|
||||
*component_devices)
|
||||
utils.execute('mdadm', '--stop', raid_device)
|
||||
utils.execute('mdadm', '--assemble', raid_device,
|
||||
*component_devices)
|
||||
except processutils.ProcessExecutionError as e:
|
||||
error_msg = ('Could not restart md device %(dev)s: %(err)s' %
|
||||
{'dev': raid_device, 'err': e})
|
||||
@ -451,8 +451,8 @@ def md_get_raid_devices():
|
||||
devices
|
||||
"""
|
||||
# Note(Boushra): mdadm output is similar to lsblk, but not
|
||||
# identical; do not use il_utils.parse_device_tags
|
||||
report = il_utils.execute('mdadm', '--examine', '--scan')[0]
|
||||
# identical; do not use utils.parse_device_tags
|
||||
report = utils.execute('mdadm', '--examine', '--scan')[0]
|
||||
lines = report.splitlines()
|
||||
result = {}
|
||||
for line in lines:
|
||||
@ -470,7 +470,7 @@ def _md_scan_and_assemble():
|
||||
This call does not fail if no md devices are present.
|
||||
"""
|
||||
try:
|
||||
il_utils.execute('mdadm', '--assemble', '--scan', '--verbose')
|
||||
utils.execute('mdadm', '--assemble', '--scan', '--verbose')
|
||||
except FileNotFoundError:
|
||||
LOG.warning('mdadm has not been found, RAID devices will not be '
|
||||
'supported')
|
||||
@ -548,9 +548,9 @@ def list_all_block_devices(block_type='disk',
|
||||
"Cause: %(error)s", {'path': disk_by_path_dir, 'error': e})
|
||||
|
||||
columns = utils.LSBLK_COLUMNS
|
||||
report = il_utils.execute('lsblk', '-bia', '--json',
|
||||
'-o{}'.format(','.join(columns)),
|
||||
check_exit_code=[0])[0]
|
||||
report = utils.execute('lsblk', '-bia', '--json',
|
||||
'-o{}'.format(','.join(columns)),
|
||||
check_exit_code=[0])[0]
|
||||
|
||||
try:
|
||||
report_json = json.loads(report)
|
||||
@ -1380,7 +1380,7 @@ class GenericHardwareManager(HardwareManager):
|
||||
if self._lshw_cache:
|
||||
return self._lshw_cache
|
||||
|
||||
out, _e = il_utils.execute('lshw', '-quiet', '-json', log_stdout=False)
|
||||
out, _e = utils.execute('lshw', '-quiet', '-json', log_stdout=False)
|
||||
out = json.loads(out)
|
||||
# Depending on lshw version, output might be a list, starting with
|
||||
# https://github.com/lyonel/lshw/commit/135a853c60582b14c5b67e5cd988a8062d9896f4 # noqa
|
||||
@ -1498,7 +1498,7 @@ class GenericHardwareManager(HardwareManager):
|
||||
return
|
||||
|
||||
try:
|
||||
stdout, _ = il_utils.execute('biosdevname', '-i', interface_name)
|
||||
stdout, _ = utils.execute('biosdevname', '-i', interface_name)
|
||||
return stdout.rstrip('\n')
|
||||
except OSError:
|
||||
if not WARN_BIOSDEVNAME_NOT_FOUND:
|
||||
@ -1614,7 +1614,7 @@ class GenericHardwareManager(HardwareManager):
|
||||
return cpus
|
||||
|
||||
def get_cpus(self):
|
||||
lines = il_utils.execute('lscpu')[0]
|
||||
lines = utils.execute('lscpu')[0]
|
||||
cpu_info = self.create_cpu_info_dict(lines)
|
||||
|
||||
# NOTE(adamcarthur) Kept this assuming it was added as a fallback
|
||||
@ -1708,7 +1708,8 @@ class GenericHardwareManager(HardwareManager):
|
||||
for hint in skip_list_hints:
|
||||
if 'volume_name' in hint:
|
||||
continue
|
||||
found_devs = il_utils.find_devices_by_hints(serialized_devs, hint)
|
||||
found_devs = device_hints.find_devices_by_hints(serialized_devs,
|
||||
hint)
|
||||
excluded_devs = {dev['name'] for dev in found_devs}
|
||||
skipped_devices = excluded_devs.difference(skip_list)
|
||||
skip_list = skip_list.union(excluded_devs)
|
||||
@ -1769,8 +1770,8 @@ class GenericHardwareManager(HardwareManager):
|
||||
tmp_ser_dev['serial'] = serial
|
||||
serialized_devs.append(tmp_ser_dev)
|
||||
try:
|
||||
device = il_utils.match_root_device_hints(serialized_devs,
|
||||
root_device_hints)
|
||||
device = device_hints.match_root_device_hints(
|
||||
serialized_devs, root_device_hints)
|
||||
except ValueError as e:
|
||||
# NOTE(lucasagomes): Just playing on the safe side
|
||||
# here, this exception should never be raised because
|
||||
@ -2097,7 +2098,7 @@ class GenericHardwareManager(HardwareManager):
|
||||
args += ('--verbose', '--iterations', str(npasses), block_device.name)
|
||||
|
||||
try:
|
||||
il_utils.execute(*args)
|
||||
utils.execute(*args)
|
||||
except (processutils.ProcessExecutionError, OSError) as e:
|
||||
LOG.error("Erasing block device %(dev)s failed with error %(err)s",
|
||||
{'dev': block_device.name, 'err': e})
|
||||
@ -2130,8 +2131,8 @@ class GenericHardwareManager(HardwareManager):
|
||||
try:
|
||||
# Don't use the '--nodeps' of lsblk to also catch the
|
||||
# parent device of partitions which are RAID members.
|
||||
out, _ = il_utils.execute('lsblk', '--fs', '--noheadings',
|
||||
block_device.name)
|
||||
out, _ = utils.execute('lsblk', '--fs', '--noheadings',
|
||||
block_device.name)
|
||||
except processutils.ProcessExecutionError as e:
|
||||
LOG.warning("Could not determine if %(name)s is a RAID member: "
|
||||
"%(err)s",
|
||||
@ -2172,7 +2173,7 @@ class GenericHardwareManager(HardwareManager):
|
||||
return False
|
||||
|
||||
def _get_ata_security_lines(self, block_device):
|
||||
output = il_utils.execute('hdparm', '-I', block_device.name)[0]
|
||||
output = utils.execute('hdparm', '-I', block_device.name)[0]
|
||||
|
||||
if '\nSecurity: ' not in output:
|
||||
return []
|
||||
@ -2205,9 +2206,9 @@ class GenericHardwareManager(HardwareManager):
|
||||
# instead of `scsi` or `sat` as smartctl will not be able to read
|
||||
# a bridged device that it doesn't understand, and accordingly
|
||||
# return an error code.
|
||||
output = il_utils.execute('smartctl', '-d', 'ata',
|
||||
block_device.name, '-g', 'security',
|
||||
check_exit_code=[0, 127])[0]
|
||||
output = utils.execute('smartctl', '-d', 'ata',
|
||||
block_device.name, '-g', 'security',
|
||||
check_exit_code=[0, 127])[0]
|
||||
if 'Unavailable' in output:
|
||||
# Smartctl is reporting it is unavailable, lets return false.
|
||||
LOG.debug('Smartctl has reported that security is '
|
||||
@ -2241,9 +2242,9 @@ class GenericHardwareManager(HardwareManager):
|
||||
if 'not locked' in security_lines:
|
||||
break
|
||||
try:
|
||||
il_utils.execute('hdparm', '--user-master', 'u',
|
||||
'--security-unlock', password,
|
||||
block_device.name)
|
||||
utils.execute('hdparm', '--user-master', 'u',
|
||||
'--security-unlock', password,
|
||||
block_device.name)
|
||||
except processutils.ProcessExecutionError as e:
|
||||
LOG.info('Security unlock failed for device '
|
||||
'%(name)s using password "%(password)s": %(err)s',
|
||||
@ -2289,9 +2290,9 @@ class GenericHardwareManager(HardwareManager):
|
||||
# SEC1. Try to transition to SEC5 by setting empty user
|
||||
# password.
|
||||
try:
|
||||
il_utils.execute('hdparm', '--user-master', 'u',
|
||||
'--security-set-pass', 'NULL',
|
||||
block_device.name)
|
||||
utils.execute('hdparm', '--user-master', 'u',
|
||||
'--security-set-pass', 'NULL',
|
||||
block_device.name)
|
||||
except processutils.ProcessExecutionError as e:
|
||||
error_msg = ('Security password set failed for device '
|
||||
'{name}: {err}'
|
||||
@ -2304,8 +2305,8 @@ class GenericHardwareManager(HardwareManager):
|
||||
erase_option += '-enhanced'
|
||||
|
||||
try:
|
||||
il_utils.execute('hdparm', '--user-master', 'u', erase_option,
|
||||
'NULL', block_device.name)
|
||||
utils.execute('hdparm', '--user-master', 'u', erase_option,
|
||||
'NULL', block_device.name)
|
||||
except processutils.ProcessExecutionError as e:
|
||||
# NOTE(TheJulia): Attempt unlock to allow fallback to shred
|
||||
# to occur, otherwise shred will fail as well, as the security
|
||||
@ -2350,8 +2351,8 @@ class GenericHardwareManager(HardwareManager):
|
||||
try:
|
||||
LOG.debug("Attempting to fetch NVMe capabilities for device %s",
|
||||
block_device.name)
|
||||
nvme_info, _e = il_utils.execute('nvme', 'id-ctrl',
|
||||
block_device.name, '-o', 'json')
|
||||
nvme_info, _e = utils.execute('nvme', 'id-ctrl',
|
||||
block_device.name, '-o', 'json')
|
||||
nvme_info = json.loads(nvme_info)
|
||||
|
||||
except processutils.ProcessExecutionError as e:
|
||||
@ -2393,8 +2394,8 @@ class GenericHardwareManager(HardwareManager):
|
||||
try:
|
||||
LOG.debug("Attempting to nvme-format %s using secure format mode "
|
||||
"(ses) %s", block_device.name, format_mode)
|
||||
il_utils.execute('nvme', 'format', block_device.name, '-s',
|
||||
format_mode, '-f')
|
||||
utils.execute('nvme', 'format', block_device.name, '-s',
|
||||
format_mode, '-f')
|
||||
LOG.info("nvme-cli format for device %s (ses= %s ) completed "
|
||||
"successfully.", block_device.name, format_mode)
|
||||
return True
|
||||
@ -2418,7 +2419,7 @@ class GenericHardwareManager(HardwareManager):
|
||||
# different types of communication media and protocols and
|
||||
# effectively used
|
||||
for channel in range(1, 12):
|
||||
out, e = il_utils.execute(
|
||||
out, e = utils.execute(
|
||||
"ipmitool lan print {} | awk '/IP Address[ \\t]*:/"
|
||||
" {{print $4}}'".format(channel), shell=True)
|
||||
if e.startswith("Invalid channel"):
|
||||
@ -2459,7 +2460,7 @@ class GenericHardwareManager(HardwareManager):
|
||||
# different types of communication media and protocols and
|
||||
# effectively used
|
||||
for channel in range(1, 12):
|
||||
out, e = il_utils.execute(
|
||||
out, e = utils.execute(
|
||||
"ipmitool lan print {} | awk '/(IP|MAC) Address[ \\t]*:/"
|
||||
" {{print $4}}'".format(channel), shell=True)
|
||||
if e.startswith("Invalid channel"):
|
||||
@ -2474,7 +2475,7 @@ class GenericHardwareManager(HardwareManager):
|
||||
|
||||
if ip == "0.0.0.0":
|
||||
# Check if we have IPv6 address configured
|
||||
out, e = il_utils.execute(
|
||||
out, e = utils.execute(
|
||||
"ipmitool lan6 print {} | awk '/^IPv6"
|
||||
" (Dynamic|Static) Address [0-9]+:/"
|
||||
" {{in_section=1; next}} /^IPv6 / {{in_section=0}}"
|
||||
@ -2536,7 +2537,7 @@ class GenericHardwareManager(HardwareManager):
|
||||
cmd = "ipmitool lan6 print {} {}_addr".format(
|
||||
channel, 'dynamic' if dynamic else 'static')
|
||||
try:
|
||||
out, exc = il_utils.execute(cmd, shell=True)
|
||||
out, exc = utils.execute(cmd, shell=True)
|
||||
except processutils.ProcessExecutionError:
|
||||
return
|
||||
|
||||
@ -2566,7 +2567,7 @@ class GenericHardwareManager(HardwareManager):
|
||||
# different types of communication media and protocols and
|
||||
# effectively used
|
||||
for channel in range(1, 12):
|
||||
addr_mode, e = il_utils.execute(
|
||||
addr_mode, e = utils.execute(
|
||||
r"ipmitool lan6 print {} enables | "
|
||||
r"awk '/IPv6\/IPv4 Addressing Enables[ \t]*:/"
|
||||
r"{{print $NF}}'".format(channel), shell=True)
|
||||
@ -2860,8 +2861,8 @@ class GenericHardwareManager(HardwareManager):
|
||||
for raid_device in raid_devices:
|
||||
device = raid_device.name
|
||||
try:
|
||||
il_utils.execute('mdadm', '--examine',
|
||||
device, use_standard_locale=True)
|
||||
utils.execute('mdadm', '--examine',
|
||||
device, use_standard_locale=True)
|
||||
except processutils.ProcessExecutionError as e:
|
||||
if "No md superblock detected" in str(e):
|
||||
continue
|
||||
@ -2988,9 +2989,9 @@ class GenericHardwareManager(HardwareManager):
|
||||
{'dev': device, 'str': start_str,
|
||||
'end': end_str})
|
||||
|
||||
il_utils.execute('parted', device, '-s', '-a',
|
||||
'optimal', '--', 'mkpart', 'primary',
|
||||
start_str, end_str)
|
||||
utils.execute('parted', device, '-s', '-a',
|
||||
'optimal', '--', 'mkpart', 'primary',
|
||||
start_str, end_str)
|
||||
|
||||
except processutils.ProcessExecutionError as e:
|
||||
msg = "Failed to create partitions on {}: {}".format(
|
||||
@ -3025,8 +3026,8 @@ class GenericHardwareManager(HardwareManager):
|
||||
"""
|
||||
|
||||
def _scan_raids():
|
||||
il_utils.execute('mdadm', '--assemble', '--scan',
|
||||
check_exit_code=False)
|
||||
utils.execute('mdadm', '--assemble', '--scan',
|
||||
check_exit_code=False)
|
||||
raid_devices = list_all_block_devices(block_type='raid',
|
||||
ignore_raid=False,
|
||||
ignore_empty=False)
|
||||
@ -3098,12 +3099,12 @@ class GenericHardwareManager(HardwareManager):
|
||||
if not do_not_delete:
|
||||
# Remove md devices.
|
||||
try:
|
||||
il_utils.execute('wipefs', '-af', raid_device.name)
|
||||
utils.execute('wipefs', '-af', raid_device.name)
|
||||
except processutils.ProcessExecutionError as e:
|
||||
LOG.warning('Failed to wipefs %(device)s: %(err)s',
|
||||
{'device': raid_device.name, 'err': e})
|
||||
try:
|
||||
il_utils.execute('mdadm', '--stop', raid_device.name)
|
||||
utils.execute('mdadm', '--stop', raid_device.name)
|
||||
except processutils.ProcessExecutionError as e:
|
||||
LOG.warning('Failed to stop %(device)s: %(err)s',
|
||||
{'device': raid_device.name, 'err': e})
|
||||
@ -3111,9 +3112,9 @@ class GenericHardwareManager(HardwareManager):
|
||||
# Remove md metadata from component devices.
|
||||
for component_device in component_devices:
|
||||
try:
|
||||
il_utils.execute('mdadm', '--examine',
|
||||
component_device,
|
||||
use_standard_locale=True)
|
||||
utils.execute('mdadm', '--examine',
|
||||
component_device,
|
||||
use_standard_locale=True)
|
||||
except processutils.ProcessExecutionError as e:
|
||||
if "No md superblock detected" in str(e):
|
||||
# actually not a component device
|
||||
@ -3125,8 +3126,8 @@ class GenericHardwareManager(HardwareManager):
|
||||
|
||||
LOG.debug('Deleting md superblock on %s', component_device)
|
||||
try:
|
||||
il_utils.execute('mdadm', '--zero-superblock',
|
||||
component_device)
|
||||
utils.execute('mdadm', '--zero-superblock',
|
||||
component_device)
|
||||
except processutils.ProcessExecutionError as e:
|
||||
LOG.warning('Failed to remove superblock from'
|
||||
'%(device)s: %(err)s',
|
||||
@ -3184,8 +3185,8 @@ class GenericHardwareManager(HardwareManager):
|
||||
if blk.name in do_not_delete_disks:
|
||||
continue
|
||||
try:
|
||||
il_utils.execute('mdadm', '--examine', blk.name,
|
||||
use_standard_locale=True)
|
||||
utils.execute('mdadm', '--examine', blk.name,
|
||||
use_standard_locale=True)
|
||||
except processutils.ProcessExecutionError as e:
|
||||
if "No md superblock detected" in str(e):
|
||||
# actually not a component device
|
||||
@ -3195,7 +3196,7 @@ class GenericHardwareManager(HardwareManager):
|
||||
{'name': blk.name, 'err': e})
|
||||
continue
|
||||
try:
|
||||
il_utils.execute('mdadm', '--zero-superblock', blk.name)
|
||||
utils.execute('mdadm', '--zero-superblock', blk.name)
|
||||
except processutils.ProcessExecutionError as e:
|
||||
LOG.warning('Failed to remove superblock from'
|
||||
'%(device)s: %(err)s',
|
||||
@ -3214,14 +3215,14 @@ class GenericHardwareManager(HardwareManager):
|
||||
'%(parts)s', {'dev': holder_disk,
|
||||
'parts': del_list})
|
||||
for part in del_list:
|
||||
il_utils.execute('parted', holder_disk, 'rm', part)
|
||||
utils.execute('parted', holder_disk, 'rm', part)
|
||||
else:
|
||||
LOG.warning('Holder disk %(dev)s contains only logical '
|
||||
'disk(s) on the skip list', holder_disk)
|
||||
continue
|
||||
LOG.info('Removing partitions on holder disk %s', holder_disk)
|
||||
try:
|
||||
il_utils.execute('wipefs', '-af', holder_disk)
|
||||
utils.execute('wipefs', '-af', holder_disk)
|
||||
except processutils.ProcessExecutionError as e:
|
||||
LOG.warning('Failed to remove partitions on %s: %s',
|
||||
holder_disk, e)
|
||||
@ -3412,7 +3413,7 @@ class GenericHardwareManager(HardwareManager):
|
||||
def _collect_udev(io_dict):
|
||||
"""Collect device properties from udev."""
|
||||
try:
|
||||
out, _e = il_utils.execute('lsblk', '-no', 'KNAME')
|
||||
out, _e = utils.execute('lsblk', '-no', 'KNAME')
|
||||
except processutils.ProcessExecutionError as exc:
|
||||
LOG.warning('Could not list block devices: %s', exc)
|
||||
return
|
||||
@ -3772,9 +3773,9 @@ def safety_check_block_device(node, device):
|
||||
if not di_info.get('wipe_special_filesystems', True):
|
||||
return
|
||||
lsblk_ids = ['UUID', 'PTUUID', 'PARTTYPE', 'PARTUUID']
|
||||
report = il_utils.execute('lsblk', '-bia', '--json',
|
||||
'-o{}'.format(','.join(lsblk_ids)),
|
||||
device, check_exit_code=[0])[0]
|
||||
report = utils.execute('lsblk', '-bia', '--json',
|
||||
'-o{}'.format(','.join(lsblk_ids)),
|
||||
device, check_exit_code=[0])[0]
|
||||
|
||||
try:
|
||||
report_json = json.loads(report)
|
||||
|
@ -20,12 +20,12 @@ from urllib import error as urlError
|
||||
from urllib.parse import urlparse
|
||||
from urllib import request
|
||||
|
||||
from ironic_lib.exception import IronicException
|
||||
from oslo_concurrency import processutils
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
from oslo_utils import fileutils
|
||||
|
||||
from ironic_python_agent.errors import RESTError
|
||||
from ironic_python_agent import utils
|
||||
|
||||
|
||||
@ -113,45 +113,45 @@ def check_prereq():
|
||||
raise e
|
||||
|
||||
|
||||
class InvalidFirmwareImageConfig(IronicException):
|
||||
class InvalidFirmwareImageConfig(RESTError):
|
||||
_msg_fmt = 'Invalid firmware image config: %(error_msg)s'
|
||||
|
||||
|
||||
class InvalidFirmwareSettingsConfig(IronicException):
|
||||
class InvalidFirmwareSettingsConfig(RESTError):
|
||||
_msg_fmt = 'Invalid firmware settings config: %(error_msg)s'
|
||||
|
||||
|
||||
class MismatchChecksumError(IronicException):
|
||||
class MismatchChecksumError(RESTError):
|
||||
_msg_fmt = 'Mismatch Checksum for the firmware image: %(error_msg)s'
|
||||
|
||||
|
||||
class MismatchComponentFlavor(IronicException):
|
||||
class MismatchComponentFlavor(RESTError):
|
||||
_msg_fmt = 'Mismatch Component Flavor: %(error_msg)s'
|
||||
|
||||
|
||||
class MismatchFWVersion(IronicException):
|
||||
class MismatchFWVersion(RESTError):
|
||||
_msg_fmt = 'Mismatch Firmware version: %(error_msg)s'
|
||||
|
||||
|
||||
class DuplicateComponentFlavor(IronicException):
|
||||
class DuplicateComponentFlavor(RESTError):
|
||||
_msg_fmt = ('Duplicate Component Flavor for the firmware image: '
|
||||
'%(error_msg)s')
|
||||
|
||||
|
||||
class DuplicateDeviceID(IronicException):
|
||||
class DuplicateDeviceID(RESTError):
|
||||
_msg_fmt = ('Duplicate Device ID for firmware settings: '
|
||||
'%(error_msg)s')
|
||||
|
||||
|
||||
class UnSupportedConfigByMstflintPackage(IronicException):
|
||||
class UnSupportedConfigByMstflintPackage(RESTError):
|
||||
_msg_fmt = 'Unsupported config by mstflint package: %(error_msg)s'
|
||||
|
||||
|
||||
class UnSupportedConfigByFW(IronicException):
|
||||
class UnSupportedConfigByFW(RESTError):
|
||||
_msg_fmt = 'Unsupported config by Firmware: %(error_msg)s'
|
||||
|
||||
|
||||
class InvalidURLScheme(IronicException):
|
||||
class InvalidURLScheme(RESTError):
|
||||
_msg_fmt = 'Invalid URL Scheme: %(error_msg)s'
|
||||
|
||||
|
||||
@ -453,7 +453,7 @@ class NvidiaNicFirmwareBinary(object):
|
||||
err = 'Firmware URL scheme %s is not supported.' \
|
||||
'The supported firmware URL schemes are' \
|
||||
'(http://, https://, file://)' % url_scheme
|
||||
raise InvalidURLScheme(error_msg=err)
|
||||
raise InvalidURLScheme(details=err)
|
||||
|
||||
def _get_info(self):
|
||||
"""Get firmware information from firmware binary image
|
||||
@ -488,7 +488,7 @@ class NvidiaNicFirmwareBinary(object):
|
||||
err = 'The provided psid %s does not match the image psid %s' % \
|
||||
(self.psid, image_psid)
|
||||
LOG.error(err)
|
||||
raise MismatchComponentFlavor(error_msg=err)
|
||||
raise MismatchComponentFlavor(details=err)
|
||||
|
||||
def _validate_image_firmware_version(self):
|
||||
"""Validate that the provided firmware version same as the version
|
||||
@ -502,7 +502,7 @@ class NvidiaNicFirmwareBinary(object):
|
||||
err = 'The provided firmware version %s does not match ' \
|
||||
'image firmware version %s' % (self.version, image_version)
|
||||
LOG.error(err)
|
||||
raise MismatchFWVersion(error_msg=err)
|
||||
raise MismatchFWVersion(details=err)
|
||||
|
||||
def _validate_image_checksum(self):
|
||||
"""Validate the provided checksum with the calculated one of the
|
||||
@ -516,7 +516,7 @@ class NvidiaNicFirmwareBinary(object):
|
||||
err = 'Mismatch provided checksum %s for image %s' % (
|
||||
self.checksum, self.url)
|
||||
LOG.error(err)
|
||||
raise MismatchChecksumError(error_msg=err)
|
||||
raise MismatchChecksumError(details=err)
|
||||
|
||||
|
||||
class NvidiaFirmwareImages(object):
|
||||
@ -545,7 +545,7 @@ class NvidiaFirmwareImages(object):
|
||||
'url, checksum, checksumType, componentFlavor, ' \
|
||||
'version' % image
|
||||
LOG.error(err)
|
||||
raise InvalidFirmwareImageConfig(error_msg=err)
|
||||
raise InvalidFirmwareImageConfig(details=err)
|
||||
|
||||
def filter_images(self, psids_list):
|
||||
"""Filter firmware images according to the system nics PSIDs,
|
||||
@ -564,7 +564,7 @@ class NvidiaFirmwareImages(object):
|
||||
err = 'Duplicate componentFlavor %s' % \
|
||||
image['componentFlavor']
|
||||
LOG.error(err)
|
||||
raise DuplicateComponentFlavor(error_msg=err)
|
||||
raise DuplicateComponentFlavor(details=err)
|
||||
else:
|
||||
self.filtered_images_psid_dict[
|
||||
image.get('componentFlavor')] = image
|
||||
@ -727,13 +727,13 @@ class NvidiaNicConfig(object):
|
||||
'please update to the latest mstflint package.' % key
|
||||
|
||||
LOG.error(err)
|
||||
raise UnSupportedConfigByMstflintPackage(error_msg=err)
|
||||
raise UnSupportedConfigByMstflintPackage(details=err)
|
||||
|
||||
if not self._param_supp_by_fw(key):
|
||||
err = 'Configuraiton %s for device %s is not supported with ' \
|
||||
'current fw' % (key, self.nvidia_dev.dev_pci)
|
||||
LOG.error(err)
|
||||
raise UnSupportedConfigByFW(error_msg=err)
|
||||
raise UnSupportedConfigByFW(details=err)
|
||||
|
||||
def set_config(self):
|
||||
"""Set device configurations
|
||||
@ -826,14 +826,14 @@ class NvidiaNicsConfig(object):
|
||||
err = 'duplicate settings for device ID %s ' % \
|
||||
setting.get('deviceID')
|
||||
LOG.error(err)
|
||||
raise DuplicateDeviceID(error_msg=err)
|
||||
raise DuplicateDeviceID(details=err)
|
||||
elif setting.get('deviceID'):
|
||||
LOG.debug('There are no devices with ID %s on the system',
|
||||
setting.get('deviceID'))
|
||||
else:
|
||||
err = 'There is no deviceID provided for this settings'
|
||||
LOG.error(err)
|
||||
raise InvalidFirmwareSettingsConfig(error_msg=err)
|
||||
raise InvalidFirmwareSettingsConfig(details=err)
|
||||
|
||||
def prepare_nvidia_nic_config(self):
|
||||
"""Expand the settings map per devices PCI and create a list
|
||||
@ -908,7 +908,7 @@ def update_nvidia_nic_firmware_image(images):
|
||||
"""
|
||||
if not type(images) is list:
|
||||
err = 'The images must be a list of images, %s' % images
|
||||
raise InvalidFirmwareImageConfig(error_msg=err)
|
||||
raise InvalidFirmwareImageConfig(details=err)
|
||||
check_prereq()
|
||||
nvidia_fw_images = NvidiaFirmwareImages(images)
|
||||
nvidia_fw_images.validate_images_schema()
|
||||
@ -926,7 +926,7 @@ def update_nvidia_nic_firmware_settings(settings):
|
||||
"""
|
||||
if not type(settings) is list:
|
||||
err = 'The settings must be list of settings, %s' % settings
|
||||
raise InvalidFirmwareSettingsConfig(error_msg=err)
|
||||
raise InvalidFirmwareSettingsConfig(details=err)
|
||||
check_prereq()
|
||||
nvidia_nics = NvidiaNics()
|
||||
nvidia_nics.discover()
|
||||
|
@ -16,7 +16,6 @@ import base64
|
||||
import contextlib
|
||||
import os
|
||||
|
||||
from ironic_lib import utils as ironic_utils
|
||||
from oslo_concurrency import processutils
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
@ -89,7 +88,7 @@ def _inject_one(node, ports, fl, root_dev, http_get):
|
||||
with _find_and_mount_path(fl['path'], fl.get('partition'),
|
||||
root_dev) as path:
|
||||
if fl.get('deleted'):
|
||||
ironic_utils.unlink_without_raise(path)
|
||||
utils.unlink_without_raise(path)
|
||||
return
|
||||
|
||||
try:
|
||||
|
@ -15,7 +15,6 @@ import random
|
||||
import select
|
||||
import threading
|
||||
|
||||
from ironic_lib import exception
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
|
||||
@ -90,7 +89,7 @@ class IronicInspection(threading.Thread):
|
||||
interval = min(interval * self.backoff_factor,
|
||||
self.max_delay)
|
||||
|
||||
except exception.ServiceLookupFailure as e:
|
||||
except errors.ServiceLookupFailure as e:
|
||||
# Likely a mDNS lookup failure. We should
|
||||
# keep retrying.
|
||||
LOG.error('Error looking up introspection '
|
||||
|
@ -18,7 +18,6 @@ import json
|
||||
import os
|
||||
import time
|
||||
|
||||
from ironic_lib import mdns
|
||||
from oslo_concurrency import processutils
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
@ -31,6 +30,7 @@ from ironic_python_agent import config
|
||||
from ironic_python_agent import encoding
|
||||
from ironic_python_agent import errors
|
||||
from ironic_python_agent import hardware
|
||||
from ironic_python_agent import mdns
|
||||
from ironic_python_agent import utils
|
||||
|
||||
|
||||
|
211
ironic_python_agent/mdns.py
Normal file
211
ironic_python_agent/mdns.py
Normal file
@ -0,0 +1,211 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Multicast DNS implementation for API discovery.
|
||||
|
||||
This implementation follows RFC 6763 as clarified by the API SIG guideline
|
||||
https://review.opendev.org/651222.
|
||||
"""
|
||||
|
||||
import ipaddress
|
||||
import logging
|
||||
import time
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_config import types as cfg_types
|
||||
import zeroconf
|
||||
|
||||
from ironic_python_agent import errors
|
||||
from ironic_python_agent import utils
|
||||
|
||||
|
||||
opts = [
|
||||
cfg.IntOpt('lookup_attempts',
|
||||
min=1, default=3,
|
||||
help='Number of attempts to lookup a service.'),
|
||||
cfg.Opt('params',
|
||||
# This is required for values that contain commas.
|
||||
type=cfg_types.Dict(cfg_types.String(quotes=True)),
|
||||
default={},
|
||||
help='Additional parameters to pass for the registered '
|
||||
'service.'),
|
||||
cfg.ListOpt('interfaces',
|
||||
help='List of IP addresses of interfaces to use for mDNS. '
|
||||
'Defaults to all interfaces on the system.'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
opt_group = cfg.OptGroup(name='mdns', title='Options for multicast DNS client')
|
||||
CONF.register_group(opt_group)
|
||||
CONF.register_opts(opts, opt_group)
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
_MDNS_DOMAIN = '_openstack._tcp.local.'
|
||||
|
||||
|
||||
class Zeroconf(object):
|
||||
"""Multicast DNS implementation client and server.
|
||||
|
||||
Uses threading internally, so there is no start method. It starts
|
||||
automatically on creation.
|
||||
|
||||
.. warning::
|
||||
The underlying library does not yet support IPv6.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize and start the mDNS server."""
|
||||
interfaces = (CONF.mdns.interfaces if CONF.mdns.interfaces
|
||||
else zeroconf.InterfaceChoice.All)
|
||||
# If interfaces are set, let zeroconf auto-detect the version
|
||||
ip_version = None if CONF.mdns.interfaces else zeroconf.IPVersion.All
|
||||
self._zc = zeroconf.Zeroconf(interfaces=interfaces,
|
||||
ip_version=ip_version)
|
||||
self._registered = []
|
||||
|
||||
def get_endpoint(self, service_type, skip_loopback=True,
|
||||
skip_link_local=False):
|
||||
"""Get an endpoint and its properties from mDNS.
|
||||
|
||||
If the requested endpoint is already in the built-in server cache, and
|
||||
its TTL is not exceeded, the cached value is returned.
|
||||
|
||||
:param service_type: OpenStack service type.
|
||||
:param skip_loopback: Whether to ignore loopback addresses.
|
||||
:param skip_link_local: Whether to ignore link local V6 addresses.
|
||||
:returns: tuple (endpoint URL, properties as a dict).
|
||||
:raises: :exc:`.ServiceLookupFailure` if the service cannot be found.
|
||||
"""
|
||||
delay = 0.1
|
||||
for attempt in range(CONF.mdns.lookup_attempts):
|
||||
name = '%s.%s' % (service_type, _MDNS_DOMAIN)
|
||||
info = self._zc.get_service_info(name, name)
|
||||
if info is not None:
|
||||
break
|
||||
elif attempt == CONF.mdns.lookup_attempts - 1:
|
||||
raise errors.ServiceLookupFailure(service=service_type)
|
||||
else:
|
||||
time.sleep(delay)
|
||||
delay *= 2
|
||||
|
||||
all_addr = info.parsed_addresses()
|
||||
|
||||
# Try to find the first routable address
|
||||
fallback = None
|
||||
for addr in all_addr:
|
||||
try:
|
||||
loopback = ipaddress.ip_address(addr).is_loopback
|
||||
except ValueError:
|
||||
LOG.debug('Skipping invalid IP address %s', addr)
|
||||
continue
|
||||
else:
|
||||
if loopback and skip_loopback:
|
||||
LOG.debug('Skipping loopback IP address %s', addr)
|
||||
continue
|
||||
|
||||
if utils.get_route_source(addr, skip_link_local):
|
||||
address = addr
|
||||
break
|
||||
elif fallback is None:
|
||||
fallback = addr
|
||||
else:
|
||||
if fallback is None:
|
||||
raise errors.ServiceLookupFailure(
|
||||
f'None of addresses {all_addr} for service %('
|
||||
'{service_type} are valid')
|
||||
else:
|
||||
LOG.warning(f'None of addresses {all_addr} seem routable, '
|
||||
f'using {fallback}')
|
||||
address = fallback
|
||||
|
||||
properties = {}
|
||||
for key, value in info.properties.items():
|
||||
try:
|
||||
if isinstance(key, bytes):
|
||||
key = key.decode('utf-8')
|
||||
except UnicodeError as exc:
|
||||
raise errors.ServiceLookupFailure(
|
||||
f'Invalid properties for service {service_type}. Cannot '
|
||||
f'decode key {key!r}: {exc!r}')
|
||||
try:
|
||||
if isinstance(value, bytes):
|
||||
value = value.decode('utf-8')
|
||||
except UnicodeError as exc:
|
||||
LOG.debug('Cannot convert value %(value)r for key %(key)s '
|
||||
'to string, assuming binary: %(exc)s',
|
||||
{'key': key, 'value': value, 'exc': exc})
|
||||
|
||||
properties[key] = value
|
||||
|
||||
path = properties.pop('path', '')
|
||||
protocol = properties.pop('protocol', None)
|
||||
if not protocol:
|
||||
if info.port == 80:
|
||||
protocol = 'http'
|
||||
else:
|
||||
protocol = 'https'
|
||||
|
||||
if info.server.endswith('.local.'):
|
||||
# Local hostname means that the catalog lists an IP address,
|
||||
# so use it
|
||||
host = address
|
||||
if int(ipaddress.ip_address(host).version) == 6:
|
||||
host = '[%s]' % host
|
||||
else:
|
||||
# Otherwise use the provided hostname.
|
||||
host = info.server.rstrip('.')
|
||||
|
||||
return ('{proto}://{host}:{port}{path}'.format(proto=protocol,
|
||||
host=host,
|
||||
port=info.port,
|
||||
path=path),
|
||||
properties)
|
||||
|
||||
def close(self):
|
||||
"""Shut down mDNS and unregister services.
|
||||
|
||||
.. note::
|
||||
If another server is running for the same services, it will
|
||||
re-register them immediately.
|
||||
"""
|
||||
for info in self._registered:
|
||||
try:
|
||||
self._zc.unregister_service(info)
|
||||
except Exception:
|
||||
LOG.exception('Cound not unregister mDNS service %s', info)
|
||||
self._zc.close()
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, *args):
|
||||
self.close()
|
||||
|
||||
|
||||
def get_endpoint(service_type):
|
||||
"""Get an endpoint and its properties from mDNS.
|
||||
|
||||
If the requested endpoint is already in the built-in server cache, and
|
||||
its TTL is not exceeded, the cached value is returned.
|
||||
|
||||
:param service_type: OpenStack service type.
|
||||
:returns: tuple (endpoint URL, properties as a dict).
|
||||
:raises: :exc:`.ServiceLookupFailure` if the service cannot be found.
|
||||
"""
|
||||
with Zeroconf() as zc:
|
||||
return zc.get_endpoint(service_type)
|
||||
|
||||
|
||||
def list_opts():
|
||||
"""Entry point for oslo-config-generator."""
|
||||
return [('mdns', opts)]
|
@ -18,7 +18,7 @@ import functools
|
||||
import random
|
||||
import time
|
||||
|
||||
from ironic_python_agent.metrics_lib import metrics_exception as exception
|
||||
from ironic_python_agent import errors
|
||||
|
||||
|
||||
class Timer(object):
|
||||
@ -28,7 +28,7 @@ class Timer(object):
|
||||
context manager, and emits the time as the metric value. It is bound to
|
||||
this MetricLogger. For example::
|
||||
|
||||
from ironic_lib import metrics_utils
|
||||
from ironic_python_agent.metrics_lib import metrics_utils
|
||||
|
||||
METRICS = metrics_utils.get_metrics_logger()
|
||||
|
||||
@ -82,7 +82,7 @@ class Counter(object):
|
||||
context manager is executed. It is bound to this MetricLogger. For
|
||||
example::
|
||||
|
||||
from ironic_lib import metrics_utils
|
||||
from ironic_python_agent.metrics_lib import metrics_utils
|
||||
|
||||
METRICS = metrics_utils.get_metrics_logger()
|
||||
|
||||
@ -141,7 +141,7 @@ class Gauge(object):
|
||||
every time the method is executed. It is bound to this MetricLogger. For
|
||||
example::
|
||||
|
||||
from ironic_lib import metrics_utils
|
||||
from ironic_python_agent.metrics_lib import metrics_utils
|
||||
|
||||
METRICS = metrics_utils.get_metrics_logger()
|
||||
|
||||
@ -291,7 +291,7 @@ class MetricLogger(object, metaclass=abc.ABCMeta):
|
||||
|
||||
def get_metrics_data(self):
|
||||
"""Return the metrics collection, if available."""
|
||||
raise exception.MetricsNotSupported()
|
||||
raise errors.MetricsNotSupported()
|
||||
|
||||
|
||||
class NoopMetricLogger(MetricLogger):
|
||||
|
@ -1,34 +0,0 @@
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Ironic base exception handling.
|
||||
|
||||
Includes decorator for re-raising Ironic-type exceptions.
|
||||
|
||||
SHOULD include dedicated exception logging.
|
||||
|
||||
"""
|
||||
|
||||
from ironic_lib.exception import IronicException
|
||||
|
||||
|
||||
class InvalidMetricConfig(IronicException):
|
||||
_msg_fmt = "Invalid value for metrics config option: %(reason)s"
|
||||
|
||||
|
||||
class MetricsNotSupported(IronicException):
|
||||
_msg_fmt = ("Metrics action is not supported. You may need to "
|
||||
"adjust the [metrics] section in ironic.conf.")
|
@ -15,9 +15,9 @@
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
from ironic_python_agent import errors
|
||||
from ironic_python_agent.metrics_lib import metrics
|
||||
from ironic_python_agent.metrics_lib import metrics_collector
|
||||
from ironic_python_agent.metrics_lib import metrics_exception as exception
|
||||
from ironic_python_agent.metrics_lib import metrics_statsd
|
||||
|
||||
metrics_opts = [
|
||||
@ -70,7 +70,7 @@ def get_metrics_logger(prefix='', backend=None, host=None, delimiter='.'):
|
||||
msg = ("This metric prefix (%s) is of unsupported type. "
|
||||
"Value should be a string or None"
|
||||
% str(prefix))
|
||||
raise exception.InvalidMetricConfig(msg)
|
||||
raise errors.InvalidMetricConfig(msg)
|
||||
|
||||
if CONF.metrics.prepend_host and host:
|
||||
if CONF.metrics.prepend_host_reverse:
|
||||
@ -99,4 +99,4 @@ def get_metrics_logger(prefix='', backend=None, host=None, delimiter='.'):
|
||||
msg = ("The backend is set to an unsupported type: "
|
||||
"%s. Value should be 'noop' or 'statsd'."
|
||||
% backend)
|
||||
raise exception.InvalidMetricConfig(msg)
|
||||
raise errors.InvalidMetricConfig(msg)
|
||||
|
@ -26,8 +26,6 @@ import shutil
|
||||
import stat
|
||||
import tempfile
|
||||
|
||||
from ironic_lib import exception
|
||||
from ironic_lib import utils
|
||||
from oslo_concurrency import processutils
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
@ -39,7 +37,7 @@ import requests
|
||||
from ironic_python_agent import disk_utils
|
||||
from ironic_python_agent import errors
|
||||
from ironic_python_agent import hardware
|
||||
from ironic_python_agent import utils as ipa_utils
|
||||
from ironic_python_agent import utils
|
||||
|
||||
|
||||
LOG = log.getLogger()
|
||||
@ -72,20 +70,20 @@ def get_configdrive(configdrive, node_uuid, tempdir=None):
|
||||
# Check if the configdrive option is a HTTP URL or the content directly
|
||||
is_url = _is_http_url(configdrive)
|
||||
if is_url:
|
||||
verify, cert = ipa_utils.get_ssl_client_options(CONF)
|
||||
verify, cert = utils.get_ssl_client_options(CONF)
|
||||
timeout = CONF.image_download_connection_timeout
|
||||
# TODO(dtantsur): support proxy parameters from instance_info
|
||||
try:
|
||||
resp = requests.get(configdrive, verify=verify, cert=cert,
|
||||
timeout=timeout)
|
||||
except requests.exceptions.RequestException as e:
|
||||
raise exception.InstanceDeployFailure(
|
||||
raise errors.DeploymentError(
|
||||
"Can't download the configdrive content for node %(node)s "
|
||||
"from '%(url)s'. Reason: %(reason)s" %
|
||||
{'node': node_uuid, 'url': configdrive, 'reason': e})
|
||||
|
||||
if resp.status_code >= 400:
|
||||
raise exception.InstanceDeployFailure(
|
||||
raise errors.DeploymentError(
|
||||
"Can't download the configdrive content for node %(node)s "
|
||||
"from '%(url)s'. Got status code %(code)s, response "
|
||||
"body %(body)s" %
|
||||
@ -122,7 +120,7 @@ def get_configdrive(configdrive, node_uuid, tempdir=None):
|
||||
'cls': type(exc).__name__})
|
||||
if is_url:
|
||||
error_msg += ' Downloaded from "%s".' % configdrive
|
||||
raise exception.InstanceDeployFailure(error_msg)
|
||||
raise errors.DeploymentError(error_msg)
|
||||
|
||||
configdrive_mb = 0
|
||||
with gzip.GzipFile('configdrive', 'rb', fileobj=data) as gunzipped:
|
||||
@ -131,7 +129,7 @@ def get_configdrive(configdrive, node_uuid, tempdir=None):
|
||||
except EnvironmentError as e:
|
||||
# Delete the created file
|
||||
utils.unlink_without_raise(configdrive_file.name)
|
||||
raise exception.InstanceDeployFailure(
|
||||
raise errors.DeploymentError(
|
||||
'Encountered error while decompressing and writing '
|
||||
'config drive for node %(node)s. Error: %(exc)s' %
|
||||
{'node': node_uuid, 'exc': e})
|
||||
@ -169,7 +167,7 @@ def get_labelled_partition(device_path, label, node_uuid):
|
||||
'for node %(node)s. Error: %(error)s' %
|
||||
{'disk': device_path, 'node': node_uuid, 'error': e})
|
||||
LOG.error(msg)
|
||||
raise exception.InstanceDeployFailure(msg)
|
||||
raise errors.DeploymentError(msg)
|
||||
|
||||
found_part = None
|
||||
if output:
|
||||
@ -178,7 +176,7 @@ def get_labelled_partition(device_path, label, node_uuid):
|
||||
if found_part:
|
||||
found_2 = '/dev/%(part)s' % {'part': dev['NAME'].strip()}
|
||||
found = [found_part, found_2]
|
||||
raise exception.InstanceDeployFailure(
|
||||
raise errors.DeploymentError(
|
||||
'More than one partition with label "%(label)s" '
|
||||
'exists on device %(device)s for node %(node)s: '
|
||||
'%(found)s.' %
|
||||
@ -269,7 +267,7 @@ def work_on_disk(dev, root_mb, swap_mb, ephemeral_mb, ephemeral_format,
|
||||
root_part = part_dict.get('root')
|
||||
|
||||
if not disk_utils.is_block_device(root_part):
|
||||
raise exception.InstanceDeployFailure(
|
||||
raise errors.DeploymentError(
|
||||
"Root device '%s' not found" % root_part)
|
||||
|
||||
for part in ('swap', 'ephemeral', 'configdrive',
|
||||
@ -279,7 +277,7 @@ def work_on_disk(dev, root_mb, swap_mb, ephemeral_mb, ephemeral_format,
|
||||
"%(node)s.", {'part': part, 'dev': part_device,
|
||||
'node': node_uuid})
|
||||
if part_device and not disk_utils.is_block_device(part_device):
|
||||
raise exception.InstanceDeployFailure(
|
||||
raise errors.DeploymentError(
|
||||
"'%(partition)s' device '%(part_device)s' not found" %
|
||||
{'partition': part, 'part_device': part_device})
|
||||
|
||||
@ -370,7 +368,7 @@ def create_config_drive_partition(node_uuid, device, configdrive):
|
||||
|
||||
confdrive_mb, confdrive_file = get_configdrive(configdrive, node_uuid)
|
||||
if confdrive_mb > MAX_CONFIG_DRIVE_SIZE_MB:
|
||||
raise exception.InstanceDeployFailure(
|
||||
raise errors.DeploymentError(
|
||||
'Config drive size exceeds maximum limit of 64MiB. '
|
||||
'Size of the given config drive is %(size)d MiB for '
|
||||
'node %(node)s.'
|
||||
@ -406,13 +404,13 @@ def create_config_drive_partition(node_uuid, device, configdrive):
|
||||
pp_count, lp_count = disk_utils.count_mbr_partitions(
|
||||
device)
|
||||
except ValueError as e:
|
||||
raise exception.InstanceDeployFailure(
|
||||
raise errors.DeploymentError(
|
||||
'Failed to check the number of primary partitions '
|
||||
'present on %(dev)s for node %(node)s. Error: '
|
||||
'%(error)s' % {'dev': device, 'node': node_uuid,
|
||||
'error': e})
|
||||
if pp_count > 3:
|
||||
raise exception.InstanceDeployFailure(
|
||||
raise errors.DeploymentError(
|
||||
'Config drive cannot be created for node %(node)s. '
|
||||
'Disk (%(dev)s) uses MBR partitioning and already '
|
||||
'has %(parts)d primary partitions.'
|
||||
@ -443,7 +441,7 @@ def create_config_drive_partition(node_uuid, device, configdrive):
|
||||
for part in disk_utils.list_partitions(device)}
|
||||
new_part = set(new_parts) - set(cur_parts)
|
||||
if len(new_part) != 1:
|
||||
raise exception.InstanceDeployFailure(
|
||||
raise errors.DeploymentError(
|
||||
'Disk partitioning failed on device %(device)s. '
|
||||
'Unable to retrieve config drive partition '
|
||||
'information.' % {'device': device})
|
||||
@ -460,7 +458,7 @@ def create_config_drive_partition(node_uuid, device, configdrive):
|
||||
'disk': device, 'node': node_uuid,
|
||||
'uuid': part_uuid}
|
||||
LOG.error(msg)
|
||||
raise exception.InstanceDeployFailure(msg)
|
||||
raise errors.DeploymentError(msg)
|
||||
|
||||
disk_utils.udev_settle()
|
||||
|
||||
@ -488,7 +486,7 @@ def create_config_drive_partition(node_uuid, device, configdrive):
|
||||
"copied onto partition %(part)s",
|
||||
{'node': node_uuid, 'part': config_drive_part})
|
||||
|
||||
except exception.InstanceDeployFailure:
|
||||
except errors.DeploymentError:
|
||||
# Since we no longer have a final action on the decorator, we need
|
||||
# to catch the failure, and still perform the cleanup.
|
||||
if confdrive_file:
|
||||
@ -500,7 +498,7 @@ def create_config_drive_partition(node_uuid, device, configdrive):
|
||||
'for node %(node)s. Error: %(error)s' %
|
||||
{'disk': device, 'node': node_uuid, 'error': e})
|
||||
LOG.error(msg)
|
||||
raise exception.InstanceDeployFailure(msg)
|
||||
raise errors.DeploymentError(msg)
|
||||
# If the configdrive was requested make sure we delete the file
|
||||
# after copying the content to the partition
|
||||
|
||||
@ -577,7 +575,7 @@ def _try_build_fat32_config_drive(partition, confdrive_file):
|
||||
'System. Due to the nature of configuration drive, it could '
|
||||
'have been incorrectly formatted. Operator investigation is '
|
||||
'required. Error: {}'.format(str(e)))
|
||||
raise exception.InstanceDeployFailure(msg)
|
||||
raise errors.DeploymentError(msg)
|
||||
finally:
|
||||
utils.execute('umount', conf_drive_temp)
|
||||
utils.execute('umount', new_drive_temp)
|
||||
@ -604,7 +602,7 @@ def _is_disk_larger_than_max_size(device, node_uuid):
|
||||
'Error: %(error)s' %
|
||||
{'disk': device, 'node': node_uuid, 'error': e})
|
||||
LOG.error(msg)
|
||||
raise exception.InstanceDeployFailure(msg)
|
||||
raise errors.DeploymentError(msg)
|
||||
|
||||
disksize_mb = int(disksize_bytes.strip()) // 1024 // 1024
|
||||
|
||||
@ -617,7 +615,7 @@ def get_partition(device, uuid):
|
||||
{'dev': device, 'uuid': uuid})
|
||||
|
||||
try:
|
||||
ipa_utils.rescan_device(device)
|
||||
utils.rescan_device(device)
|
||||
lsblk, _ = utils.execute(
|
||||
'lsblk', '-PbioKNAME,UUID,PARTUUID,TYPE,LABEL', device)
|
||||
if lsblk:
|
||||
|
@ -13,7 +13,6 @@
|
||||
import logging
|
||||
import os
|
||||
|
||||
from ironic_lib import utils
|
||||
from oslo_concurrency import processutils
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import imageutils
|
||||
@ -21,6 +20,7 @@ from oslo_utils import units
|
||||
import tenacity
|
||||
|
||||
from ironic_python_agent import errors
|
||||
from ironic_python_agent import utils
|
||||
|
||||
"""
|
||||
Imported from ironic_lib/qemu-img.py from commit
|
||||
|
@ -14,10 +14,10 @@ import copy
|
||||
import re
|
||||
import shlex
|
||||
|
||||
from ironic_lib import utils as il_utils
|
||||
from oslo_concurrency import processutils
|
||||
from oslo_log import log as logging
|
||||
|
||||
from ironic_python_agent import device_hints
|
||||
from ironic_python_agent import disk_utils
|
||||
from ironic_python_agent import errors
|
||||
from ironic_python_agent import utils
|
||||
@ -55,7 +55,7 @@ def get_block_devices_for_raid(block_devices, logical_disks):
|
||||
matching = []
|
||||
for phys_disk in logical_disk['physical_disks']:
|
||||
candidates = [
|
||||
dev['name'] for dev in il_utils.find_devices_by_hints(
|
||||
dev['name'] for dev in device_hints.find_devices_by_hints(
|
||||
serialized_devs, phys_disk)
|
||||
]
|
||||
if not candidates:
|
||||
@ -404,7 +404,7 @@ def prepare_boot_partitions_for_softraid(device, holders, efi_part,
|
||||
utils.execute('wipefs', '-a', efi_part)
|
||||
else:
|
||||
fslabel = 'efi-part'
|
||||
il_utils.mkfs(fs='vfat', path=md_device, label=fslabel)
|
||||
utils.mkfs(fs='vfat', path=md_device, label=fslabel)
|
||||
|
||||
return md_device
|
||||
|
||||
|
@ -17,7 +17,6 @@
|
||||
|
||||
import subprocess
|
||||
|
||||
import ironic_lib
|
||||
from oslo_concurrency import processutils
|
||||
from oslo_config import cfg
|
||||
from oslo_config import fixture as config_fixture
|
||||
@ -28,6 +27,7 @@ from oslotest import base as test_base
|
||||
from ironic_python_agent import config
|
||||
from ironic_python_agent.extensions import base as ext_base
|
||||
from ironic_python_agent import hardware
|
||||
from ironic_python_agent import utils
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
@ -56,12 +56,11 @@ class IronicAgentTest(test_base.BaseTestCase):
|
||||
# mock it causes things to not work correctly. As doing an
|
||||
# autospec=True causes strangeness. By using a simple function we
|
||||
# can then mock it without issue.
|
||||
self.patch(ironic_lib.utils, 'execute', do_not_call)
|
||||
self.patch(utils, 'execute', do_not_call)
|
||||
self.patch(processutils, 'execute', do_not_call)
|
||||
self.patch(subprocess, 'call', do_not_call)
|
||||
self.patch(subprocess, 'check_call', do_not_call)
|
||||
self.patch(subprocess, 'check_output', do_not_call)
|
||||
# ironic_python_agent.utils.execute is an alias of ironic_lib one
|
||||
|
||||
ext_base._EXT_MANAGER = None
|
||||
hardware._CACHED_HW_INFO = None
|
||||
@ -87,5 +86,5 @@ class IronicAgentTest(test_base.BaseTestCase):
|
||||
def do_not_call(*args, **kwargs):
|
||||
"""Helper function to raise an exception if it is called"""
|
||||
raise Exception(
|
||||
"Don't call ironic_lib.utils.execute() / "
|
||||
"Don't call utils.execute() / "
|
||||
"processutils.execute() or similar functions in tests!")
|
||||
|
@ -18,7 +18,6 @@ import shutil
|
||||
import tempfile
|
||||
from unittest import mock
|
||||
|
||||
from ironic_lib import utils as ilib_utils
|
||||
from oslo_concurrency import processutils
|
||||
|
||||
from ironic_python_agent import disk_utils
|
||||
@ -30,13 +29,14 @@ from ironic_python_agent import partition_utils
|
||||
from ironic_python_agent import raid_utils
|
||||
from ironic_python_agent.tests.unit import base
|
||||
from ironic_python_agent.tests.unit.samples import hardware_samples as hws
|
||||
from ironic_python_agent import utils
|
||||
|
||||
|
||||
EFI_RESULT = ''.encode('utf-16')
|
||||
|
||||
|
||||
@mock.patch.object(hardware, 'dispatch_to_managers', autospec=True)
|
||||
@mock.patch.object(ilib_utils, 'execute', autospec=True)
|
||||
@mock.patch.object(utils, 'execute', autospec=True)
|
||||
@mock.patch.object(tempfile, 'mkdtemp', lambda *_: '/tmp/fake-dir')
|
||||
@mock.patch.object(shutil, 'rmtree', lambda *_: None)
|
||||
class TestImageExtension(base.IronicAgentTest):
|
||||
|
@ -17,7 +17,6 @@ import tempfile
|
||||
import time
|
||||
from unittest import mock
|
||||
|
||||
from ironic_lib import exception
|
||||
from oslo_concurrency import processutils
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import units
|
||||
@ -338,7 +337,7 @@ class TestStandbyExtension(base.IronicAgentTest):
|
||||
image_info = _build_fake_image_info()
|
||||
validate_mock.return_value = (image_info['disk_format'], 0)
|
||||
|
||||
fix_gpt_mock.side_effect = exception.InstanceDeployFailure
|
||||
fix_gpt_mock.side_effect = errors.DeploymentError
|
||||
standby._write_image(image_info, device)
|
||||
|
||||
@mock.patch('ironic_python_agent.qemu_img.convert_image', autospec=True)
|
||||
@ -896,7 +895,7 @@ class TestStandbyExtension(base.IronicAgentTest):
|
||||
|
||||
@mock.patch('ironic_python_agent.disk_utils.get_disk_identifier',
|
||||
lambda dev: 'ROOT')
|
||||
@mock.patch('ironic_lib.utils.execute', autospec=True)
|
||||
@mock.patch('ironic_python_agent.utils.execute', autospec=True)
|
||||
@mock.patch('ironic_python_agent.disk_utils.list_partitions',
|
||||
autospec=True)
|
||||
@mock.patch.object(partition_utils, 'create_config_drive_partition',
|
||||
@ -947,7 +946,7 @@ class TestStandbyExtension(base.IronicAgentTest):
|
||||
self.assertEqual({'root uuid': 'ROOT'},
|
||||
self.agent_extension.partition_uuids)
|
||||
|
||||
@mock.patch('ironic_lib.utils.execute', autospec=True)
|
||||
@mock.patch('ironic_python_agent.utils.execute', autospec=True)
|
||||
@mock.patch('ironic_python_agent.disk_utils.list_partitions',
|
||||
autospec=True)
|
||||
@mock.patch.object(partition_utils, 'create_config_drive_partition',
|
||||
@ -1020,7 +1019,7 @@ class TestStandbyExtension(base.IronicAgentTest):
|
||||
|
||||
@mock.patch('ironic_python_agent.disk_utils.get_disk_identifier',
|
||||
lambda dev: 'ROOT')
|
||||
@mock.patch('ironic_lib.utils.execute', autospec=True)
|
||||
@mock.patch('ironic_python_agent.utils.execute', autospec=True)
|
||||
@mock.patch.object(partition_utils, 'create_config_drive_partition',
|
||||
autospec=True)
|
||||
@mock.patch('ironic_python_agent.disk_utils.list_partitions',
|
||||
@ -1112,7 +1111,7 @@ class TestStandbyExtension(base.IronicAgentTest):
|
||||
|
||||
@mock.patch('ironic_python_agent.disk_utils.get_disk_identifier',
|
||||
side_effect=OSError, autospec=True)
|
||||
@mock.patch('ironic_lib.utils.execute',
|
||||
@mock.patch('ironic_python_agent.utils.execute',
|
||||
autospec=True)
|
||||
@mock.patch('ironic_python_agent.disk_utils.list_partitions',
|
||||
autospec=True)
|
||||
@ -1164,7 +1163,7 @@ class TestStandbyExtension(base.IronicAgentTest):
|
||||
attempts=mock.ANY)
|
||||
self.assertEqual({}, self.agent_extension.partition_uuids)
|
||||
|
||||
@mock.patch('ironic_lib.utils.execute', mock.Mock())
|
||||
@mock.patch('ironic_python_agent.utils.execute', mock.Mock())
|
||||
@mock.patch('ironic_python_agent.disk_utils.list_partitions',
|
||||
lambda _dev: [mock.Mock()])
|
||||
@mock.patch('ironic_python_agent.disk_utils.get_disk_identifier',
|
||||
|
@ -15,8 +15,8 @@
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
from ironic_python_agent import errors
|
||||
from ironic_python_agent.metrics_lib import metrics as metricslib
|
||||
from ironic_python_agent.metrics_lib import metrics_exception as exception
|
||||
from ironic_python_agent.metrics_lib import metrics_statsd
|
||||
from ironic_python_agent.metrics_lib import metrics_utils
|
||||
from ironic_python_agent.tests.unit import base
|
||||
@ -40,15 +40,15 @@ class TestGetLogger(base.IronicAgentTest):
|
||||
CONF.clear_override('backend', group='metrics')
|
||||
|
||||
def test_nonexisting_backend(self):
|
||||
self.assertRaises(exception.InvalidMetricConfig,
|
||||
self.assertRaises(errors.InvalidMetricConfig,
|
||||
metrics_utils.get_metrics_logger, 'foo', 'test')
|
||||
|
||||
def test_numeric_prefix(self):
|
||||
self.assertRaises(exception.InvalidMetricConfig,
|
||||
self.assertRaises(errors.InvalidMetricConfig,
|
||||
metrics_utils.get_metrics_logger, 1)
|
||||
|
||||
def test_numeric_list_prefix(self):
|
||||
self.assertRaises(exception.InvalidMetricConfig,
|
||||
self.assertRaises(errors.InvalidMetricConfig,
|
||||
metrics_utils.get_metrics_logger, (1, 2))
|
||||
|
||||
def test_default_prefix(self):
|
||||
|
@ -19,7 +19,6 @@ import time
|
||||
from unittest import mock
|
||||
from unittest.mock import sentinel
|
||||
|
||||
from ironic_lib import exception as lib_exc
|
||||
from oslo_concurrency import processutils
|
||||
from oslo_config import cfg
|
||||
from stevedore import extension
|
||||
@ -323,7 +322,7 @@ class TestBaseAgent(ironic_agent_base.IronicAgentTest):
|
||||
self.agent.heartbeater.start.assert_called_once_with()
|
||||
self.assertTrue(CONF.md5_enabled)
|
||||
|
||||
@mock.patch('ironic_lib.mdns.get_endpoint', autospec=True)
|
||||
@mock.patch('ironic_python_agent.mdns.get_endpoint', autospec=True)
|
||||
@mock.patch(
|
||||
'ironic_python_agent.hardware_managers.cna._detect_cna_card',
|
||||
mock.Mock())
|
||||
@ -378,7 +377,7 @@ class TestBaseAgent(ironic_agent_base.IronicAgentTest):
|
||||
mock_dispatch.call_args_list)
|
||||
self.agent.heartbeater.start.assert_called_once_with()
|
||||
|
||||
@mock.patch('ironic_lib.mdns.get_endpoint', autospec=True)
|
||||
@mock.patch('ironic_python_agent.mdns.get_endpoint', autospec=True)
|
||||
@mock.patch(
|
||||
'ironic_python_agent.hardware_managers.cna._detect_cna_card',
|
||||
mock.Mock())
|
||||
@ -625,7 +624,7 @@ class TestBaseAgent(ironic_agent_base.IronicAgentTest):
|
||||
self.agent.heartbeater.start.assert_called_once_with()
|
||||
|
||||
@mock.patch.object(hardware, '_enable_multipath', autospec=True)
|
||||
@mock.patch('ironic_lib.mdns.get_endpoint', autospec=True)
|
||||
@mock.patch('ironic_python_agent.mdns.get_endpoint', autospec=True)
|
||||
@mock.patch(
|
||||
'ironic_python_agent.hardware_managers.cna._detect_cna_card',
|
||||
mock.Mock())
|
||||
@ -644,7 +643,7 @@ class TestBaseAgent(ironic_agent_base.IronicAgentTest):
|
||||
mock_wait,
|
||||
mock_mdns,
|
||||
mock_mpath):
|
||||
mock_mdns.side_effect = lib_exc.ServiceLookupFailure()
|
||||
mock_mdns.side_effect = errors.ServiceLookupFailure()
|
||||
# If inspection_callback_url is configured and api_url is not when the
|
||||
# agent starts, ensure that the inspection will be called and wsgi
|
||||
# server will work as usual. Also, make sure api_client and heartbeater
|
||||
@ -684,7 +683,7 @@ class TestBaseAgent(ironic_agent_base.IronicAgentTest):
|
||||
self.assertFalse(mock_dispatch.called)
|
||||
|
||||
@mock.patch.object(hardware, '_enable_multipath', autospec=True)
|
||||
@mock.patch('ironic_lib.mdns.get_endpoint', autospec=True)
|
||||
@mock.patch('ironic_python_agent.mdns.get_endpoint', autospec=True)
|
||||
@mock.patch(
|
||||
'ironic_python_agent.hardware_managers.cna._detect_cna_card',
|
||||
mock.Mock())
|
||||
@ -703,7 +702,7 @@ class TestBaseAgent(ironic_agent_base.IronicAgentTest):
|
||||
mock_wait,
|
||||
mock_mdns,
|
||||
mock_mpath):
|
||||
mock_mdns.side_effect = lib_exc.ServiceLookupFailure()
|
||||
mock_mdns.side_effect = errors.ServiceLookupFailure()
|
||||
# If both api_url and inspection_callback_url are not configured when
|
||||
# the agent starts, ensure that the inspection will be skipped and wsgi
|
||||
# server will work as usual. Also, make sure api_client and heartbeater
|
||||
|
@ -14,20 +14,19 @@
|
||||
import subprocess
|
||||
from unittest import mock
|
||||
|
||||
import ironic_lib
|
||||
from oslo_concurrency import processutils
|
||||
|
||||
from ironic_python_agent.tests.unit import base as ironic_agent_base
|
||||
from ironic_python_agent.tests.unit import base
|
||||
from ironic_python_agent import utils
|
||||
|
||||
|
||||
class BlockExecuteTestCase(ironic_agent_base.IronicAgentTest):
|
||||
class BlockExecuteTestCase(base.IronicAgentTest):
|
||||
"""Test to ensure we block access to the 'execute' type functions"""
|
||||
|
||||
def test_exception_raised_for_execute(self):
|
||||
execute_functions = (ironic_lib.utils.execute, processutils.execute,
|
||||
subprocess.call, subprocess.check_call,
|
||||
subprocess.check_output, utils.execute)
|
||||
execute_functions = (processutils.execute, subprocess.call,
|
||||
subprocess.check_call, subprocess.check_output,
|
||||
utils.execute)
|
||||
|
||||
for function_name in execute_functions:
|
||||
exc = self.assertRaises(Exception, function_name, ["echo", "%s" % function_name]) # noqa
|
||||
@ -35,7 +34,7 @@ class BlockExecuteTestCase(ironic_agent_base.IronicAgentTest):
|
||||
# get H202 error in 'pep8' check.
|
||||
|
||||
self.assertEqual(
|
||||
"Don't call ironic_lib.utils.execute() / "
|
||||
"Don't call utils.execute() / "
|
||||
"processutils.execute() or similar functions in tests!",
|
||||
"%s" % exc)
|
||||
|
||||
@ -51,17 +50,14 @@ class BlockExecuteTestCase(ironic_agent_base.IronicAgentTest):
|
||||
self.assertEqual(2, mock_exec.call_count)
|
||||
|
||||
|
||||
class DontBlockExecuteTestCase(ironic_agent_base.IronicAgentTest):
|
||||
class DontBlockExecuteTestCase(base.IronicAgentTest):
|
||||
"""Ensure we can turn off blocking access to 'execute' type functions"""
|
||||
|
||||
# Don't block the execute function
|
||||
block_execute = False
|
||||
|
||||
@mock.patch.object(ironic_lib.utils, "execute", autospec=True)
|
||||
@mock.patch.object(utils, "execute", autospec=True)
|
||||
def test_no_exception_raised_for_execute(self, mock_exec):
|
||||
# Make sure we can call utils.execute() even though we didn't mock it.
|
||||
# We do mock ironic_lib.utils.execute() so we don't actually execute
|
||||
# anything.
|
||||
utils.execute("ls")
|
||||
utils.execute("echo")
|
||||
self.assertEqual(2, mock_exec.call_count)
|
||||
|
@ -12,7 +12,6 @@
|
||||
|
||||
from unittest import mock
|
||||
|
||||
from ironic_lib import utils
|
||||
from oslo_concurrency import processutils
|
||||
from tooz import coordination
|
||||
|
||||
@ -20,6 +19,7 @@ from ironic_python_agent import burnin
|
||||
from ironic_python_agent import errors
|
||||
from ironic_python_agent import hardware
|
||||
from ironic_python_agent.tests.unit import base
|
||||
from ironic_python_agent import utils
|
||||
|
||||
|
||||
SMART_OUTPUT_JSON_COMPLETED = ("""
|
||||
|
330
ironic_python_agent/tests/unit/test_device_hints.py
Normal file
330
ironic_python_agent/tests/unit/test_device_hints.py
Normal file
@ -0,0 +1,330 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import copy
|
||||
|
||||
from ironic_python_agent import device_hints
|
||||
from ironic_python_agent.tests.unit import base
|
||||
|
||||
|
||||
class ParseRootDeviceTestCase(base.IronicAgentTest):
|
||||
|
||||
def test_parse_root_device_hints_without_operators(self):
|
||||
root_device = {
|
||||
'wwn': '123456', 'model': 'FOO model', 'size': 12345,
|
||||
'serial': 'foo-serial', 'vendor': 'foo VENDOR with space',
|
||||
'name': '/dev/sda', 'wwn_with_extension': '123456111',
|
||||
'wwn_vendor_extension': '111', 'rotational': True,
|
||||
'hctl': '1:0:0:0', 'by_path': '/dev/disk/by-path/1:0:0:0'}
|
||||
result = device_hints.parse_root_device_hints(root_device)
|
||||
expected = {
|
||||
'wwn': 's== 123456', 'model': 's== foo%20model',
|
||||
'size': '== 12345', 'serial': 's== foo-serial',
|
||||
'vendor': 's== foo%20vendor%20with%20space',
|
||||
'name': 's== /dev/sda', 'wwn_with_extension': 's== 123456111',
|
||||
'wwn_vendor_extension': 's== 111', 'rotational': True,
|
||||
'hctl': 's== 1%3A0%3A0%3A0',
|
||||
'by_path': 's== /dev/disk/by-path/1%3A0%3A0%3A0'}
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_parse_root_device_hints_with_operators(self):
|
||||
root_device = {
|
||||
'wwn': 's== 123456', 'model': 's== foo MODEL', 'size': '>= 12345',
|
||||
'serial': 's!= foo-serial', 'vendor': 's== foo VENDOR with space',
|
||||
'name': '<or> /dev/sda <or> /dev/sdb',
|
||||
'wwn_with_extension': 's!= 123456111',
|
||||
'wwn_vendor_extension': 's== 111', 'rotational': True,
|
||||
'hctl': 's== 1:0:0:0', 'by_path': 's== /dev/disk/by-path/1:0:0:0'}
|
||||
|
||||
# Validate strings being normalized
|
||||
expected = copy.deepcopy(root_device)
|
||||
expected['model'] = 's== foo%20model'
|
||||
expected['vendor'] = 's== foo%20vendor%20with%20space'
|
||||
expected['hctl'] = 's== 1%3A0%3A0%3A0'
|
||||
expected['by_path'] = 's== /dev/disk/by-path/1%3A0%3A0%3A0'
|
||||
|
||||
result = device_hints.parse_root_device_hints(root_device)
|
||||
# The hints already contain the operators, make sure we keep it
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_parse_root_device_hints_string_compare_operator_name(self):
|
||||
root_device = {'name': 's== /dev/sdb'}
|
||||
# Validate strings being normalized
|
||||
expected = copy.deepcopy(root_device)
|
||||
result = device_hints.parse_root_device_hints(root_device)
|
||||
# The hints already contain the operators, make sure we keep it
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_parse_root_device_hints_no_hints(self):
|
||||
result = device_hints.parse_root_device_hints({})
|
||||
self.assertIsNone(result)
|
||||
|
||||
def test_parse_root_device_hints_convert_size(self):
|
||||
for size in (12345, '12345'):
|
||||
result = device_hints.parse_root_device_hints({'size': size})
|
||||
self.assertEqual({'size': '== 12345'}, result)
|
||||
|
||||
def test_parse_root_device_hints_invalid_size(self):
|
||||
for value in ('not-int', -123, 0):
|
||||
self.assertRaises(ValueError, device_hints.parse_root_device_hints,
|
||||
{'size': value})
|
||||
|
||||
def test_parse_root_device_hints_int_or(self):
|
||||
expr = '<or> 123 <or> 456 <or> 789'
|
||||
result = device_hints.parse_root_device_hints({'size': expr})
|
||||
self.assertEqual({'size': expr}, result)
|
||||
|
||||
def test_parse_root_device_hints_int_or_invalid(self):
|
||||
expr = '<or> 123 <or> non-int <or> 789'
|
||||
self.assertRaises(ValueError, device_hints.parse_root_device_hints,
|
||||
{'size': expr})
|
||||
|
||||
def test_parse_root_device_hints_string_or_space(self):
|
||||
expr = '<or> foo <or> foo bar <or> bar'
|
||||
expected = '<or> foo <or> foo%20bar <or> bar'
|
||||
result = device_hints.parse_root_device_hints({'model': expr})
|
||||
self.assertEqual({'model': expected}, result)
|
||||
|
||||
def _parse_root_device_hints_convert_rotational(self, values,
|
||||
expected_value):
|
||||
for value in values:
|
||||
result = device_hints.parse_root_device_hints(
|
||||
{'rotational': value})
|
||||
self.assertEqual({'rotational': expected_value}, result)
|
||||
|
||||
def test_parse_root_device_hints_convert_rotational(self):
|
||||
self._parse_root_device_hints_convert_rotational(
|
||||
(True, 'true', 'on', 'y', 'yes'), True)
|
||||
|
||||
self._parse_root_device_hints_convert_rotational(
|
||||
(False, 'false', 'off', 'n', 'no'), False)
|
||||
|
||||
def test_parse_root_device_hints_invalid_rotational(self):
|
||||
self.assertRaises(ValueError, device_hints.parse_root_device_hints,
|
||||
{'rotational': 'not-bool'})
|
||||
|
||||
def test_parse_root_device_hints_invalid_wwn(self):
|
||||
self.assertRaises(ValueError, device_hints.parse_root_device_hints,
|
||||
{'wwn': 123})
|
||||
|
||||
def test_parse_root_device_hints_invalid_wwn_with_extension(self):
|
||||
self.assertRaises(ValueError, device_hints.parse_root_device_hints,
|
||||
{'wwn_with_extension': 123})
|
||||
|
||||
def test_parse_root_device_hints_invalid_wwn_vendor_extension(self):
|
||||
self.assertRaises(ValueError, device_hints.parse_root_device_hints,
|
||||
{'wwn_vendor_extension': 123})
|
||||
|
||||
def test_parse_root_device_hints_invalid_model(self):
|
||||
self.assertRaises(ValueError, device_hints.parse_root_device_hints,
|
||||
{'model': 123})
|
||||
|
||||
def test_parse_root_device_hints_invalid_serial(self):
|
||||
self.assertRaises(ValueError, device_hints.parse_root_device_hints,
|
||||
{'serial': 123})
|
||||
|
||||
def test_parse_root_device_hints_invalid_vendor(self):
|
||||
self.assertRaises(ValueError, device_hints.parse_root_device_hints,
|
||||
{'vendor': 123})
|
||||
|
||||
def test_parse_root_device_hints_invalid_name(self):
|
||||
self.assertRaises(ValueError, device_hints.parse_root_device_hints,
|
||||
{'name': 123})
|
||||
|
||||
def test_parse_root_device_hints_invalid_hctl(self):
|
||||
self.assertRaises(ValueError, device_hints.parse_root_device_hints,
|
||||
{'hctl': 123})
|
||||
|
||||
def test_parse_root_device_hints_invalid_by_path(self):
|
||||
self.assertRaises(ValueError, device_hints.parse_root_device_hints,
|
||||
{'by_path': 123})
|
||||
|
||||
def test_parse_root_device_hints_non_existent_hint(self):
|
||||
self.assertRaises(ValueError, device_hints.parse_root_device_hints,
|
||||
{'non-existent': 'foo'})
|
||||
|
||||
def test_extract_hint_operator_and_values_single_value(self):
|
||||
expected = {'op': '>=', 'values': ['123']}
|
||||
self.assertEqual(
|
||||
expected, device_hints._extract_hint_operator_and_values(
|
||||
'>= 123', 'size'))
|
||||
|
||||
def test_extract_hint_operator_and_values_multiple_values(self):
|
||||
expected = {'op': '<or>', 'values': ['123', '456', '789']}
|
||||
expr = '<or> 123 <or> 456 <or> 789'
|
||||
self.assertEqual(
|
||||
expected,
|
||||
device_hints._extract_hint_operator_and_values(expr, 'size'))
|
||||
|
||||
def test_extract_hint_operator_and_values_multiple_values_space(self):
|
||||
expected = {'op': '<or>', 'values': ['foo', 'foo bar', 'bar']}
|
||||
expr = '<or> foo <or> foo bar <or> bar'
|
||||
self.assertEqual(
|
||||
expected,
|
||||
device_hints._extract_hint_operator_and_values(expr, 'model'))
|
||||
|
||||
def test_extract_hint_operator_and_values_no_operator(self):
|
||||
expected = {'op': '', 'values': ['123']}
|
||||
self.assertEqual(
|
||||
expected,
|
||||
device_hints._extract_hint_operator_and_values('123', 'size'))
|
||||
|
||||
def test_extract_hint_operator_and_values_empty_value(self):
|
||||
self.assertRaises(
|
||||
ValueError,
|
||||
device_hints._extract_hint_operator_and_values, '', 'size')
|
||||
|
||||
def test_extract_hint_operator_and_values_integer(self):
|
||||
expected = {'op': '', 'values': ['123']}
|
||||
self.assertEqual(
|
||||
expected,
|
||||
device_hints._extract_hint_operator_and_values(123, 'size'))
|
||||
|
||||
def test__append_operator_to_hints(self):
|
||||
root_device = {'serial': 'foo', 'size': 12345,
|
||||
'model': 'foo model', 'rotational': True}
|
||||
expected = {'serial': 's== foo', 'size': '== 12345',
|
||||
'model': 's== foo model', 'rotational': True}
|
||||
|
||||
result = device_hints._append_operator_to_hints(root_device)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_normalize_hint_expression_or(self):
|
||||
expr = '<or> foo <or> foo bar <or> bar'
|
||||
expected = '<or> foo <or> foo%20bar <or> bar'
|
||||
result = device_hints._normalize_hint_expression(expr, 'model')
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_normalize_hint_expression_in(self):
|
||||
expr = '<in> foo <in> foo bar <in> bar'
|
||||
expected = '<in> foo <in> foo%20bar <in> bar'
|
||||
result = device_hints._normalize_hint_expression(expr, 'model')
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_normalize_hint_expression_op_space(self):
|
||||
expr = 's== test string with space'
|
||||
expected = 's== test%20string%20with%20space'
|
||||
result = device_hints._normalize_hint_expression(expr, 'model')
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_normalize_hint_expression_op_no_space(self):
|
||||
expr = 's!= SpongeBob'
|
||||
expected = 's!= spongebob'
|
||||
result = device_hints._normalize_hint_expression(expr, 'model')
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_normalize_hint_expression_no_op_space(self):
|
||||
expr = 'no operators'
|
||||
expected = 'no%20operators'
|
||||
result = device_hints._normalize_hint_expression(expr, 'model')
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_normalize_hint_expression_no_op_no_space(self):
|
||||
expr = 'NoSpace'
|
||||
expected = 'nospace'
|
||||
result = device_hints._normalize_hint_expression(expr, 'model')
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_normalize_hint_expression_empty_value(self):
|
||||
self.assertRaises(
|
||||
ValueError, device_hints._normalize_hint_expression, '', 'size')
|
||||
|
||||
|
||||
class MatchRootDeviceTestCase(base.IronicAgentTest):
|
||||
|
||||
def setUp(self):
|
||||
super(MatchRootDeviceTestCase, self).setUp()
|
||||
self.devices = [
|
||||
{'name': '/dev/sda', 'size': 64424509440, 'model': 'ok model',
|
||||
'serial': 'fakeserial'},
|
||||
{'name': '/dev/sdb', 'size': 128849018880, 'model': 'big model',
|
||||
'serial': 'veryfakeserial', 'rotational': 'yes'},
|
||||
{'name': '/dev/sdc', 'size': 10737418240, 'model': 'small model',
|
||||
'serial': 'veryveryfakeserial', 'rotational': False},
|
||||
]
|
||||
|
||||
def test_match_root_device_hints_one_hint(self):
|
||||
root_device_hints = {'size': '>= 70'}
|
||||
dev = device_hints.match_root_device_hints(
|
||||
self.devices, root_device_hints)
|
||||
self.assertEqual('/dev/sdb', dev['name'])
|
||||
|
||||
def test_match_root_device_hints_rotational(self):
|
||||
root_device_hints = {'rotational': False}
|
||||
dev = device_hints.match_root_device_hints(
|
||||
self.devices, root_device_hints)
|
||||
self.assertEqual('/dev/sdc', dev['name'])
|
||||
|
||||
def test_match_root_device_hints_rotational_convert_devices_bool(self):
|
||||
root_device_hints = {'size': '>=100', 'rotational': True}
|
||||
dev = device_hints.match_root_device_hints(
|
||||
self.devices, root_device_hints)
|
||||
self.assertEqual('/dev/sdb', dev['name'])
|
||||
|
||||
def test_match_root_device_hints_multiple_hints(self):
|
||||
root_device_hints = {'size': '>= 50', 'model': 's==big model',
|
||||
'serial': 's==veryfakeserial'}
|
||||
dev = device_hints.match_root_device_hints(
|
||||
self.devices, root_device_hints)
|
||||
self.assertEqual('/dev/sdb', dev['name'])
|
||||
|
||||
def test_match_root_device_hints_multiple_hints2(self):
|
||||
root_device_hints = {
|
||||
'size': '<= 20',
|
||||
'model': '<or> model 5 <or> foomodel <or> small model <or>',
|
||||
'serial': 's== veryveryfakeserial'}
|
||||
dev = device_hints.match_root_device_hints(
|
||||
self.devices, root_device_hints)
|
||||
self.assertEqual('/dev/sdc', dev['name'])
|
||||
|
||||
def test_match_root_device_hints_multiple_hints3(self):
|
||||
root_device_hints = {'rotational': False, 'model': '<in> small'}
|
||||
dev = device_hints.match_root_device_hints(
|
||||
self.devices, root_device_hints)
|
||||
self.assertEqual('/dev/sdc', dev['name'])
|
||||
|
||||
def test_match_root_device_hints_no_operators(self):
|
||||
root_device_hints = {'size': '120', 'model': 'big model',
|
||||
'serial': 'veryfakeserial'}
|
||||
dev = device_hints.match_root_device_hints(
|
||||
self.devices, root_device_hints)
|
||||
self.assertEqual('/dev/sdb', dev['name'])
|
||||
|
||||
def test_match_root_device_hints_no_device_found(self):
|
||||
root_device_hints = {'size': '>=50', 'model': 's==foo'}
|
||||
dev = device_hints.match_root_device_hints(
|
||||
self.devices, root_device_hints)
|
||||
self.assertIsNone(dev)
|
||||
|
||||
def test_match_root_device_hints_empty_device_attribute(self):
|
||||
empty_dev = [{'name': '/dev/sda', 'model': ' '}]
|
||||
dev = device_hints.match_root_device_hints(
|
||||
empty_dev, {'model': 'foo'})
|
||||
self.assertIsNone(dev)
|
||||
|
||||
def test_find_devices_all(self):
|
||||
root_device_hints = {'size': '>= 10'}
|
||||
devs = list(device_hints.find_devices_by_hints(self.devices,
|
||||
root_device_hints))
|
||||
self.assertEqual(self.devices, devs)
|
||||
|
||||
def test_find_devices_none(self):
|
||||
root_device_hints = {'size': '>= 100500'}
|
||||
devs = list(device_hints.find_devices_by_hints(self.devices,
|
||||
root_device_hints))
|
||||
self.assertEqual([], devs)
|
||||
|
||||
def test_find_devices_name(self):
|
||||
root_device_hints = {'name': 's== /dev/sda'}
|
||||
devs = list(device_hints.find_devices_by_hints(self.devices,
|
||||
root_device_hints))
|
||||
self.assertEqual([self.devices[0]], devs)
|
@ -15,17 +15,16 @@
|
||||
|
||||
from unittest import mock
|
||||
|
||||
from ironic_lib import exception
|
||||
from ironic_lib.tests import base
|
||||
from ironic_lib import utils
|
||||
|
||||
from ironic_python_agent import disk_partitioner
|
||||
from ironic_python_agent import errors
|
||||
from ironic_python_agent.tests.unit import base
|
||||
from ironic_python_agent import utils
|
||||
|
||||
|
||||
CONF = disk_partitioner.CONF
|
||||
|
||||
|
||||
class DiskPartitionerTestCase(base.IronicLibTestCase):
|
||||
class DiskPartitionerTestCase(base.IronicAgentTest):
|
||||
|
||||
def test_add_partition(self):
|
||||
dp = disk_partitioner.DiskPartitioner('/dev/fake')
|
||||
@ -157,7 +156,7 @@ class DiskPartitionerTestCase(base.IronicLibTestCase):
|
||||
# Test as if the 'busybox' version of 'fuser' which does not have
|
||||
# stderr output
|
||||
mock_utils_exc.return_value = ("10000 10001", '')
|
||||
self.assertRaises(exception.InstanceDeployFailure, dp.commit)
|
||||
self.assertRaises(errors.DeploymentError, dp.commit)
|
||||
|
||||
mock_disk_partitioner_exec.assert_called_once_with(
|
||||
mock.ANY, 'mklabel', 'msdos',
|
||||
@ -190,7 +189,7 @@ class DiskPartitionerTestCase(base.IronicLibTestCase):
|
||||
mock_gp.return_value = fake_parts
|
||||
mock_utils_exc.return_value = ('', "Specified filename /dev/fake"
|
||||
" does not exist.")
|
||||
self.assertRaises(exception.InstanceDeployFailure, dp.commit)
|
||||
self.assertRaises(errors.DeploymentError, dp.commit)
|
||||
|
||||
mock_disk_partitioner_exec.assert_called_once_with(
|
||||
mock.ANY, 'mklabel', 'msdos',
|
||||
|
@ -18,8 +18,6 @@ import os
|
||||
import stat
|
||||
from unittest import mock
|
||||
|
||||
from ironic_lib import exception
|
||||
from ironic_lib import utils
|
||||
from oslo_concurrency import processutils
|
||||
from oslo_config import cfg
|
||||
from oslo_utils.imageutils import format_inspector
|
||||
@ -27,9 +25,10 @@ from oslo_utils.imageutils import QemuImgInfo
|
||||
from oslo_utils import units
|
||||
|
||||
from ironic_python_agent import disk_utils
|
||||
from ironic_python_agent.errors import InvalidImage
|
||||
from ironic_python_agent import errors
|
||||
from ironic_python_agent import qemu_img
|
||||
from ironic_python_agent.tests.unit import base
|
||||
from ironic_python_agent import utils
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
@ -559,7 +558,7 @@ class OtherFunctionTestCase(base.IronicAgentTest):
|
||||
def test_is_block_device_raises(self, mock_os):
|
||||
device = '/dev/disk/by-path/ip-1.2.3.4:5678-iscsi-iqn.fake-lun-9'
|
||||
mock_os.side_effect = OSError
|
||||
self.assertRaises(exception.InstanceDeployFailure,
|
||||
self.assertRaises(errors.DeploymentError,
|
||||
disk_utils.is_block_device, device)
|
||||
mock_os.assert_has_calls([mock.call(device)] * 3)
|
||||
|
||||
@ -569,7 +568,7 @@ class OtherFunctionTestCase(base.IronicAgentTest):
|
||||
group='disk_utils')
|
||||
device = '/dev/disk/by-path/ip-1.2.3.4:5678-iscsi-iqn.fake-lun-9'
|
||||
mock_os.side_effect = OSError
|
||||
self.assertRaises(exception.InstanceDeployFailure,
|
||||
self.assertRaises(errors.DeploymentError,
|
||||
disk_utils.is_block_device, device)
|
||||
mock_os.assert_has_calls([mock.call(device)] * 2)
|
||||
|
||||
@ -656,7 +655,7 @@ Identified 1 problems!
|
||||
@mock.patch.object(disk_utils.LOG, 'error', autospec=True)
|
||||
def test_fix_gpt_structs_exc(self, mock_log, mock_execute):
|
||||
mock_execute.side_effect = processutils.ProcessExecutionError
|
||||
self.assertRaisesRegex(exception.InstanceDeployFailure,
|
||||
self.assertRaisesRegex(errors.DeploymentError,
|
||||
'Failed to fix GPT data structures on disk',
|
||||
disk_utils._fix_gpt_structs,
|
||||
self.dev, self.node_uuid)
|
||||
@ -854,7 +853,7 @@ class WaitForDisk(base.IronicAgentTest):
|
||||
side_effect=processutils.ProcessExecutionError(
|
||||
stderr='fake'))
|
||||
def test_wait_for_disk_to_become_available_no_fuser(self, mock_exc):
|
||||
self.assertRaises(exception.IronicException,
|
||||
self.assertRaises(errors.RESTError,
|
||||
disk_utils.wait_for_disk_to_become_available,
|
||||
'fake-dev')
|
||||
fuser_cmd = ['fuser', 'fake-dev']
|
||||
@ -878,7 +877,7 @@ class WaitForDisk(base.IronicAgentTest):
|
||||
'holding device fake-dev: 15503, 3919, 15510, '
|
||||
'15511. Timed out waiting for completion.')
|
||||
self.assertRaisesRegex(
|
||||
exception.IronicException,
|
||||
errors.RESTError,
|
||||
expected_error,
|
||||
disk_utils.wait_for_disk_to_become_available,
|
||||
'fake-dev')
|
||||
@ -903,7 +902,7 @@ class WaitForDisk(base.IronicAgentTest):
|
||||
'holding device fake-dev: 15503, 3919, 15510, '
|
||||
'15511. Timed out waiting for completion.')
|
||||
self.assertRaisesRegex(
|
||||
exception.IronicException,
|
||||
errors.RESTError,
|
||||
expected_error,
|
||||
disk_utils.wait_for_disk_to_become_available,
|
||||
'fake-dev')
|
||||
@ -925,7 +924,7 @@ class WaitForDisk(base.IronicAgentTest):
|
||||
'locks for device fake-dev. Timed out waiting '
|
||||
'for completion.')
|
||||
self.assertRaisesRegex(
|
||||
exception.IronicException,
|
||||
errors.RESTError,
|
||||
expected_error,
|
||||
disk_utils.wait_for_disk_to_become_available,
|
||||
'fake-dev')
|
||||
@ -999,7 +998,7 @@ class GetAndValidateImageFormat(base.IronicAgentTest):
|
||||
CONF.set_override('disable_deep_image_inspection', False)
|
||||
fmt = 'qcow3'
|
||||
mock_ii.return_value = MockFormatInspectorCls(fmt, 0, True)
|
||||
self.assertRaises(InvalidImage,
|
||||
self.assertRaises(errors.InvalidImage,
|
||||
disk_utils.get_and_validate_image_format,
|
||||
'/fake/path', fmt)
|
||||
mock_ii.assert_called_once_with('/fake/path')
|
||||
@ -1010,7 +1009,7 @@ class GetAndValidateImageFormat(base.IronicAgentTest):
|
||||
CONF.set_override('disable_deep_image_inspection', False)
|
||||
fmt = 'qcow2'
|
||||
mock_ii.return_value = MockFormatInspectorCls('qcow3', 0, True)
|
||||
self.assertRaises(InvalidImage,
|
||||
self.assertRaises(errors.InvalidImage,
|
||||
disk_utils.get_and_validate_image_format,
|
||||
'/fake/path', fmt)
|
||||
|
||||
@ -1058,11 +1057,11 @@ class ImageInspectionTest(base.IronicAgentTest):
|
||||
def test_image_inspection_fail_safety_check(self, mock_fi):
|
||||
inspector = MockFormatInspectorCls('qcow2', 0, False)
|
||||
mock_fi.return_value = inspector
|
||||
self.assertRaises(InvalidImage, disk_utils._image_inspection,
|
||||
self.assertRaises(errors.InvalidImage, disk_utils._image_inspection,
|
||||
'/fake/path')
|
||||
|
||||
@mock.patch.object(format_inspector, 'detect_file_format', autospec=True)
|
||||
def test_image_inspection_fail_format_error(self, mock_fi):
|
||||
mock_fi.side_effect = format_inspector.ImageFormatError
|
||||
self.assertRaises(InvalidImage, disk_utils._image_inspection,
|
||||
self.assertRaises(errors.InvalidImage, disk_utils._image_inspection,
|
||||
'/fake/path')
|
||||
|
@ -14,8 +14,6 @@
|
||||
|
||||
import json
|
||||
|
||||
from ironic_lib import exception as lib_exc
|
||||
|
||||
from ironic_python_agent import encoding
|
||||
from ironic_python_agent.tests.unit import base
|
||||
|
||||
@ -73,10 +71,3 @@ class TestEncoder(base.IronicAgentTest):
|
||||
expected = {'jack': 'hello', 'jill': 'world'}
|
||||
obj = SerializableTesting('hello', 'world')
|
||||
self.assertEqual(expected, json.loads(self.encoder.encode(obj)))
|
||||
|
||||
def test_ironic_lib(self):
|
||||
obj = lib_exc.InstanceDeployFailure(reason='boom')
|
||||
encoded = json.loads(self.encoder.encode(obj))
|
||||
self.assertEqual(500, encoded['code'])
|
||||
self.assertEqual('InstanceDeployFailure', encoded['type'])
|
||||
self.assertIn('boom', encoded['message'])
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -114,7 +114,7 @@ class TestInspect(base.IronicAgentTest):
|
||||
mock_call.assert_not_called()
|
||||
self.assertIsNone(result)
|
||||
|
||||
@mock.patch('ironic_lib.mdns.get_endpoint', autospec=True)
|
||||
@mock.patch('ironic_python_agent.mdns.get_endpoint', autospec=True)
|
||||
def test_mdns(self, mock_mdns, mock_ext_mgr, mock_call):
|
||||
CONF.set_override('inspection_callback_url', 'mdns')
|
||||
mock_mdns.return_value = 'http://example', {
|
||||
|
234
ironic_python_agent/tests/unit/test_mdns.py
Normal file
234
ironic_python_agent/tests/unit/test_mdns.py
Normal file
@ -0,0 +1,234 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import socket
|
||||
from unittest import mock
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
from ironic_python_agent import errors
|
||||
from ironic_python_agent import mdns
|
||||
from ironic_python_agent.tests.unit.base import IronicAgentTest
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
@mock.patch('ironic_python_agent.utils.get_route_source', autospec=True)
|
||||
@mock.patch('zeroconf.Zeroconf', autospec=True)
|
||||
class GetEndpointTestCase(IronicAgentTest):
|
||||
def test_simple(self, mock_zc, mock_route):
|
||||
mock_zc.return_value.get_service_info.return_value = mock.Mock(
|
||||
address=socket.inet_aton('192.168.1.1'),
|
||||
port=80,
|
||||
properties={},
|
||||
**{'parsed_addresses.return_value': ['192.168.1.1']}
|
||||
)
|
||||
|
||||
endp, params = mdns.get_endpoint('baremetal')
|
||||
self.assertEqual('http://192.168.1.1:80', endp)
|
||||
self.assertEqual({}, params)
|
||||
mock_zc.return_value.get_service_info.assert_called_once_with(
|
||||
'baremetal._openstack._tcp.local.',
|
||||
'baremetal._openstack._tcp.local.'
|
||||
)
|
||||
mock_zc.return_value.close.assert_called_once_with()
|
||||
|
||||
def test_v6(self, mock_zc, mock_route):
|
||||
mock_zc.return_value.get_service_info.return_value = mock.Mock(
|
||||
port=80,
|
||||
properties={},
|
||||
**{'parsed_addresses.return_value': ['::2']}
|
||||
)
|
||||
|
||||
endp, params = mdns.get_endpoint('baremetal')
|
||||
self.assertEqual('http://[::2]:80', endp)
|
||||
self.assertEqual({}, params)
|
||||
mock_zc.return_value.get_service_info.assert_called_once_with(
|
||||
'baremetal._openstack._tcp.local.',
|
||||
'baremetal._openstack._tcp.local.'
|
||||
)
|
||||
mock_zc.return_value.close.assert_called_once_with()
|
||||
|
||||
def test_skip_invalid(self, mock_zc, mock_route):
|
||||
mock_zc.return_value.get_service_info.return_value = mock.Mock(
|
||||
port=80,
|
||||
properties={},
|
||||
**{'parsed_addresses.return_value': ['::1', '::2', '::3']}
|
||||
)
|
||||
mock_route.side_effect = [None, '::4']
|
||||
|
||||
endp, params = mdns.get_endpoint('baremetal')
|
||||
self.assertEqual('http://[::3]:80', endp)
|
||||
self.assertEqual({}, params)
|
||||
mock_zc.return_value.get_service_info.assert_called_once_with(
|
||||
'baremetal._openstack._tcp.local.',
|
||||
'baremetal._openstack._tcp.local.'
|
||||
)
|
||||
mock_zc.return_value.close.assert_called_once_with()
|
||||
self.assertEqual(2, mock_route.call_count)
|
||||
|
||||
def test_fallback(self, mock_zc, mock_route):
|
||||
mock_zc.return_value.get_service_info.return_value = mock.Mock(
|
||||
port=80,
|
||||
properties={},
|
||||
**{'parsed_addresses.return_value': ['::2', '::3']}
|
||||
)
|
||||
mock_route.side_effect = [None, None]
|
||||
|
||||
endp, params = mdns.get_endpoint('baremetal')
|
||||
self.assertEqual('http://[::2]:80', endp)
|
||||
self.assertEqual({}, params)
|
||||
mock_zc.return_value.get_service_info.assert_called_once_with(
|
||||
'baremetal._openstack._tcp.local.',
|
||||
'baremetal._openstack._tcp.local.'
|
||||
)
|
||||
mock_zc.return_value.close.assert_called_once_with()
|
||||
self.assertEqual(2, mock_route.call_count)
|
||||
|
||||
def test_localhost_only(self, mock_zc, mock_route):
|
||||
mock_zc.return_value.get_service_info.return_value = mock.Mock(
|
||||
port=80,
|
||||
properties={},
|
||||
**{'parsed_addresses.return_value': ['::1']}
|
||||
)
|
||||
|
||||
self.assertRaises(errors.ServiceLookupFailure,
|
||||
mdns.get_endpoint, 'baremetal')
|
||||
mock_zc.return_value.get_service_info.assert_called_once_with(
|
||||
'baremetal._openstack._tcp.local.',
|
||||
'baremetal._openstack._tcp.local.'
|
||||
)
|
||||
mock_zc.return_value.close.assert_called_once_with()
|
||||
self.assertFalse(mock_route.called)
|
||||
|
||||
def test_https(self, mock_zc, mock_route):
|
||||
mock_zc.return_value.get_service_info.return_value = mock.Mock(
|
||||
address=socket.inet_aton('192.168.1.1'),
|
||||
port=443,
|
||||
properties={},
|
||||
**{'parsed_addresses.return_value': ['192.168.1.1']}
|
||||
)
|
||||
|
||||
endp, params = mdns.get_endpoint('baremetal')
|
||||
self.assertEqual('https://192.168.1.1:443', endp)
|
||||
self.assertEqual({}, params)
|
||||
mock_zc.return_value.get_service_info.assert_called_once_with(
|
||||
'baremetal._openstack._tcp.local.',
|
||||
'baremetal._openstack._tcp.local.'
|
||||
)
|
||||
|
||||
def test_with_custom_port_and_path(self, mock_zc, mock_route):
|
||||
mock_zc.return_value.get_service_info.return_value = mock.Mock(
|
||||
address=socket.inet_aton('192.168.1.1'),
|
||||
port=8080,
|
||||
properties={b'path': b'/baremetal'},
|
||||
**{'parsed_addresses.return_value': ['192.168.1.1']}
|
||||
)
|
||||
|
||||
endp, params = mdns.get_endpoint('baremetal')
|
||||
self.assertEqual('https://192.168.1.1:8080/baremetal', endp)
|
||||
self.assertEqual({}, params)
|
||||
mock_zc.return_value.get_service_info.assert_called_once_with(
|
||||
'baremetal._openstack._tcp.local.',
|
||||
'baremetal._openstack._tcp.local.'
|
||||
)
|
||||
|
||||
def test_with_custom_port_path_and_protocol(self, mock_zc, mock_route):
|
||||
mock_zc.return_value.get_service_info.return_value = mock.Mock(
|
||||
address=socket.inet_aton('192.168.1.1'),
|
||||
port=8080,
|
||||
properties={b'path': b'/baremetal', b'protocol': b'http'},
|
||||
**{'parsed_addresses.return_value': ['192.168.1.1']}
|
||||
)
|
||||
|
||||
endp, params = mdns.get_endpoint('baremetal')
|
||||
self.assertEqual('http://192.168.1.1:8080/baremetal', endp)
|
||||
self.assertEqual({}, params)
|
||||
mock_zc.return_value.get_service_info.assert_called_once_with(
|
||||
'baremetal._openstack._tcp.local.',
|
||||
'baremetal._openstack._tcp.local.'
|
||||
)
|
||||
|
||||
def test_with_params(self, mock_zc, mock_route):
|
||||
mock_zc.return_value.get_service_info.return_value = mock.Mock(
|
||||
address=socket.inet_aton('192.168.1.1'),
|
||||
port=80,
|
||||
properties={b'ipa_debug': True},
|
||||
**{'parsed_addresses.return_value': ['192.168.1.1']}
|
||||
)
|
||||
|
||||
endp, params = mdns.get_endpoint('baremetal')
|
||||
self.assertEqual('http://192.168.1.1:80', endp)
|
||||
self.assertEqual({'ipa_debug': True}, params)
|
||||
mock_zc.return_value.get_service_info.assert_called_once_with(
|
||||
'baremetal._openstack._tcp.local.',
|
||||
'baremetal._openstack._tcp.local.'
|
||||
)
|
||||
|
||||
def test_binary_data(self, mock_zc, mock_route):
|
||||
mock_zc.return_value.get_service_info.return_value = mock.Mock(
|
||||
address=socket.inet_aton('192.168.1.1'),
|
||||
port=80,
|
||||
properties={b'ipa_debug': True, b'binary': b'\xe2\x28\xa1'},
|
||||
**{'parsed_addresses.return_value': ['192.168.1.1']}
|
||||
)
|
||||
|
||||
endp, params = mdns.get_endpoint('baremetal')
|
||||
self.assertEqual('http://192.168.1.1:80', endp)
|
||||
self.assertEqual({'ipa_debug': True, 'binary': b'\xe2\x28\xa1'},
|
||||
params)
|
||||
mock_zc.return_value.get_service_info.assert_called_once_with(
|
||||
'baremetal._openstack._tcp.local.',
|
||||
'baremetal._openstack._tcp.local.'
|
||||
)
|
||||
|
||||
def test_invalid_key(self, mock_zc, mock_route):
|
||||
mock_zc.return_value.get_service_info.return_value = mock.Mock(
|
||||
address=socket.inet_aton('192.168.1.1'),
|
||||
port=80,
|
||||
properties={b'ipa_debug': True, b'\xc3\x28': b'value'},
|
||||
**{'parsed_addresses.return_value': ['192.168.1.1']}
|
||||
)
|
||||
|
||||
self.assertRaisesRegex(errors.ServiceLookupFailure,
|
||||
'Cannot decode key',
|
||||
mdns.get_endpoint, 'baremetal')
|
||||
mock_zc.return_value.get_service_info.assert_called_once_with(
|
||||
'baremetal._openstack._tcp.local.',
|
||||
'baremetal._openstack._tcp.local.'
|
||||
)
|
||||
|
||||
def test_with_server(self, mock_zc, mock_route):
|
||||
mock_zc.return_value.get_service_info.return_value = mock.Mock(
|
||||
address=socket.inet_aton('192.168.1.1'),
|
||||
port=443,
|
||||
server='openstack.example.com.',
|
||||
properties={},
|
||||
**{'parsed_addresses.return_value': ['192.168.1.1']}
|
||||
)
|
||||
|
||||
endp, params = mdns.get_endpoint('baremetal')
|
||||
self.assertEqual('https://openstack.example.com:443', endp)
|
||||
self.assertEqual({}, params)
|
||||
mock_zc.return_value.get_service_info.assert_called_once_with(
|
||||
'baremetal._openstack._tcp.local.',
|
||||
'baremetal._openstack._tcp.local.'
|
||||
)
|
||||
|
||||
@mock.patch('time.sleep', autospec=True)
|
||||
def test_not_found(self, mock_sleep, mock_zc, mock_route):
|
||||
mock_zc.return_value.get_service_info.return_value = None
|
||||
|
||||
self.assertRaisesRegex(errors.ServiceLookupFailure,
|
||||
'baremetal service',
|
||||
mdns.get_endpoint, 'baremetal')
|
||||
self.assertEqual(CONF.mdns.lookup_attempts - 1, mock_sleep.call_count)
|
@ -15,8 +15,6 @@ import shutil
|
||||
import tempfile
|
||||
from unittest import mock
|
||||
|
||||
from ironic_lib import exception
|
||||
from ironic_lib import utils
|
||||
from oslo_concurrency import processutils
|
||||
from oslo_config import cfg
|
||||
import requests
|
||||
@ -28,6 +26,7 @@ from ironic_python_agent import hardware
|
||||
from ironic_python_agent import partition_utils
|
||||
from ironic_python_agent import qemu_img
|
||||
from ironic_python_agent.tests.unit import base
|
||||
from ironic_python_agent import utils
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
@ -114,7 +113,7 @@ class GetConfigdriveTestCase(base.IronicAgentTest):
|
||||
|
||||
def test_get_configdrive_bad_url(self, mock_requests, mock_copy):
|
||||
mock_requests.side_effect = requests.exceptions.RequestException
|
||||
self.assertRaises(exception.InstanceDeployFailure,
|
||||
self.assertRaises(errors.DeploymentError,
|
||||
partition_utils.get_configdrive,
|
||||
'http://1.2.3.4/cd', 'fake-node-uuid')
|
||||
self.assertFalse(mock_copy.called)
|
||||
@ -122,13 +121,13 @@ class GetConfigdriveTestCase(base.IronicAgentTest):
|
||||
def test_get_configdrive_bad_status_code(self, mock_requests, mock_copy):
|
||||
mock_requests.return_value = mock.MagicMock(text='Not found',
|
||||
status_code=404)
|
||||
self.assertRaises(exception.InstanceDeployFailure,
|
||||
self.assertRaises(errors.DeploymentError,
|
||||
partition_utils.get_configdrive,
|
||||
'http://1.2.3.4/cd', 'fake-node-uuid')
|
||||
self.assertFalse(mock_copy.called)
|
||||
|
||||
def test_get_configdrive_base64_error(self, mock_requests, mock_copy):
|
||||
self.assertRaises(exception.InstanceDeployFailure,
|
||||
self.assertRaises(errors.DeploymentError,
|
||||
partition_utils.get_configdrive,
|
||||
'malformed', 'fake-node-uuid')
|
||||
self.assertFalse(mock_copy.called)
|
||||
@ -139,7 +138,7 @@ class GetConfigdriveTestCase(base.IronicAgentTest):
|
||||
mock_requests.return_value = mock.MagicMock(content='Zm9vYmFy',
|
||||
status_code=200)
|
||||
mock_copy.side_effect = IOError
|
||||
self.assertRaises(exception.InstanceDeployFailure,
|
||||
self.assertRaises(errors.DeploymentError,
|
||||
partition_utils.get_configdrive,
|
||||
'http://1.2.3.4/cd', 'fake-node-uuid')
|
||||
mock_requests.assert_called_once_with('http://1.2.3.4/cd',
|
||||
@ -212,7 +211,7 @@ class GetLabelledPartitionTestCases(base.IronicAgentTest):
|
||||
'NAME="fake13" LABEL="%s"\n' %
|
||||
(label, label))
|
||||
mock_execute.side_effect = [(None, ''), (lsblk_output, '')]
|
||||
self.assertRaisesRegex(exception.InstanceDeployFailure,
|
||||
self.assertRaisesRegex(errors.DeploymentError,
|
||||
'fake .*fake12 .*fake13',
|
||||
partition_utils.get_labelled_partition,
|
||||
self.dev, self.config_part_label,
|
||||
@ -228,7 +227,7 @@ class GetLabelledPartitionTestCases(base.IronicAgentTest):
|
||||
@mock.patch.object(partition_utils.LOG, 'error', autospec=True)
|
||||
def test_get_partition_exc(self, mock_log, mock_execute):
|
||||
mock_execute.side_effect = processutils.ProcessExecutionError
|
||||
self.assertRaisesRegex(exception.InstanceDeployFailure,
|
||||
self.assertRaisesRegex(errors.DeploymentError,
|
||||
'Failed to retrieve partition labels',
|
||||
partition_utils.get_labelled_partition,
|
||||
self.dev, self.config_part_label,
|
||||
@ -273,7 +272,7 @@ class IsDiskLargerThanMaxSizeTestCases(base.IronicAgentTest):
|
||||
@mock.patch.object(partition_utils.LOG, 'error', autospec=True)
|
||||
def test_is_disk_larger_than_max_size_exc(self, mock_log, mock_execute):
|
||||
mock_execute.side_effect = processutils.ProcessExecutionError
|
||||
self.assertRaisesRegex(exception.InstanceDeployFailure,
|
||||
self.assertRaisesRegex(errors.DeploymentError,
|
||||
'Failed to get size of disk',
|
||||
partition_utils._is_disk_larger_than_max_size,
|
||||
self.dev, self.node_uuid)
|
||||
@ -316,7 +315,7 @@ class WorkOnDiskTestCase(base.IronicAgentTest):
|
||||
|
||||
def test_no_root_partition(self):
|
||||
self.mock_ibd.return_value = False
|
||||
self.assertRaises(exception.InstanceDeployFailure,
|
||||
self.assertRaises(errors.DeploymentError,
|
||||
partition_utils.work_on_disk, self.dev, self.root_mb,
|
||||
self.swap_mb, self.ephemeral_mb,
|
||||
self.ephemeral_format, self.image_path,
|
||||
@ -335,7 +334,7 @@ class WorkOnDiskTestCase(base.IronicAgentTest):
|
||||
self.mock_ibd.side_effect = iter([True, False])
|
||||
calls = [mock.call(self.root_part),
|
||||
mock.call(self.swap_part)]
|
||||
self.assertRaises(exception.InstanceDeployFailure,
|
||||
self.assertRaises(errors.DeploymentError,
|
||||
partition_utils.work_on_disk, self.dev, self.root_mb,
|
||||
self.swap_mb, self.ephemeral_mb,
|
||||
self.ephemeral_format, self.image_path,
|
||||
@ -364,7 +363,7 @@ class WorkOnDiskTestCase(base.IronicAgentTest):
|
||||
calls = [mock.call(root_part),
|
||||
mock.call(swap_part),
|
||||
mock.call(ephemeral_part)]
|
||||
self.assertRaises(exception.InstanceDeployFailure,
|
||||
self.assertRaises(errors.DeploymentError,
|
||||
partition_utils.work_on_disk, self.dev, self.root_mb,
|
||||
self.swap_mb, ephemeral_mb, ephemeral_format,
|
||||
self.image_path, self.node_uuid)
|
||||
@ -395,7 +394,7 @@ class WorkOnDiskTestCase(base.IronicAgentTest):
|
||||
calls = [mock.call(root_part),
|
||||
mock.call(swap_part),
|
||||
mock.call(configdrive_part)]
|
||||
self.assertRaises(exception.InstanceDeployFailure,
|
||||
self.assertRaises(errors.DeploymentError,
|
||||
partition_utils.work_on_disk, self.dev, self.root_mb,
|
||||
self.swap_mb, self.ephemeral_mb,
|
||||
self.ephemeral_format, self.image_path,
|
||||
@ -1031,7 +1030,7 @@ class CreateConfigDriveTestCases(base.IronicAgentTest):
|
||||
# 2 primary partitions, 0 logical partitions
|
||||
mock_count.return_value = (2, 0)
|
||||
|
||||
self.assertRaisesRegex(exception.InstanceDeployFailure,
|
||||
self.assertRaisesRegex(errors.DeploymentError,
|
||||
'Disk partitioning failed on device',
|
||||
partition_utils.create_config_drive_partition,
|
||||
self.node_uuid, self.dev, config_url)
|
||||
@ -1103,7 +1102,7 @@ class CreateConfigDriveTestCases(base.IronicAgentTest):
|
||||
|
||||
mock_execute.side_effect = processutils.ProcessExecutionError
|
||||
|
||||
self.assertRaisesRegex(exception.InstanceDeployFailure,
|
||||
self.assertRaisesRegex(errors.DeploymentError,
|
||||
'Failed to create config drive on disk',
|
||||
partition_utils.create_config_drive_partition,
|
||||
self.node_uuid, self.dev, config_url)
|
||||
@ -1161,7 +1160,7 @@ class CreateConfigDriveTestCases(base.IronicAgentTest):
|
||||
# 4 primary partitions, 0 logical partitions
|
||||
mock_count.return_value = (4, 0)
|
||||
|
||||
self.assertRaisesRegex(exception.InstanceDeployFailure,
|
||||
self.assertRaisesRegex(errors.DeploymentError,
|
||||
'Config drive cannot be created for node',
|
||||
partition_utils.create_config_drive_partition,
|
||||
self.node_uuid, self.dev, config_url)
|
||||
@ -1191,7 +1190,7 @@ class CreateConfigDriveTestCases(base.IronicAgentTest):
|
||||
mock_get_configdrive.return_value = (configdrive_mb, configdrive_file)
|
||||
mock_get_labelled_partition.return_value = None
|
||||
|
||||
self.assertRaisesRegex(exception.InstanceDeployFailure,
|
||||
self.assertRaisesRegex(errors.DeploymentError,
|
||||
'Config drive size exceeds maximum limit',
|
||||
partition_utils.create_config_drive_partition,
|
||||
self.node_uuid, self.dev, config_url)
|
||||
@ -1223,7 +1222,7 @@ class CreateConfigDriveTestCases(base.IronicAgentTest):
|
||||
mock_get_labelled_partition.return_value = None
|
||||
mock_count.side_effect = ValueError('Booooom')
|
||||
|
||||
self.assertRaisesRegex(exception.InstanceDeployFailure,
|
||||
self.assertRaisesRegex(errors.DeploymentError,
|
||||
'Failed to check the number of primary ',
|
||||
partition_utils.create_config_drive_partition,
|
||||
self.node_uuid, self.dev, config_url)
|
||||
@ -1501,7 +1500,7 @@ class TestConfigDriveTestRecovery(base.IronicAgentTest):
|
||||
mock_execute):
|
||||
mock_mkfs.side_effect = processutils.ProcessExecutionError('boom')
|
||||
self.assertRaisesRegex(
|
||||
exception.InstanceDeployFailure,
|
||||
errors.DeploymentError,
|
||||
'A failure occurred while attempting to format.*',
|
||||
partition_utils._try_build_fat32_config_drive,
|
||||
self.fake_dev,
|
||||
@ -1516,3 +1515,14 @@ class TestConfigDriveTestRecovery(base.IronicAgentTest):
|
||||
mock_mkfs.assert_called_once_with(fs='vfat', path=self.fake_dev,
|
||||
label='CONFIG-2')
|
||||
mock_copy.assert_not_called()
|
||||
|
||||
|
||||
class IsHttpUrlTestCase(base.IronicAgentTest):
|
||||
|
||||
def test__is_http_url(self):
|
||||
self.assertTrue(partition_utils._is_http_url('http://127.0.0.1'))
|
||||
self.assertTrue(partition_utils._is_http_url('https://127.0.0.1'))
|
||||
self.assertTrue(partition_utils._is_http_url('HTTP://127.1.2.3'))
|
||||
self.assertTrue(partition_utils._is_http_url('HTTPS://127.3.2.1'))
|
||||
self.assertFalse(partition_utils._is_http_url('Zm9vYmFy'))
|
||||
self.assertFalse(partition_utils._is_http_url('11111111'))
|
||||
|
@ -13,20 +13,20 @@
|
||||
import os
|
||||
from unittest import mock
|
||||
|
||||
from ironic_lib.tests import base
|
||||
from ironic_lib import utils
|
||||
from oslo_concurrency import processutils
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import imageutils
|
||||
|
||||
from ironic_python_agent import errors
|
||||
from ironic_python_agent import qemu_img
|
||||
from ironic_python_agent.tests.unit import base
|
||||
from ironic_python_agent import utils
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
class ImageInfoTestCase(base.IronicLibTestCase):
|
||||
class ImageInfoTestCase(base.IronicAgentTest):
|
||||
|
||||
@mock.patch.object(os.path, 'exists', return_value=False, autospec=True)
|
||||
def test_image_info_path_doesnt_exist_disabled(self, path_exists_mock):
|
||||
@ -79,7 +79,7 @@ class ImageInfoTestCase(base.IronicLibTestCase):
|
||||
image_info_mock.assert_not_called()
|
||||
|
||||
|
||||
class ConvertImageTestCase(base.IronicLibTestCase):
|
||||
class ConvertImageTestCase(base.IronicAgentTest):
|
||||
|
||||
@mock.patch.object(utils, 'execute', autospec=True)
|
||||
def test_convert_image_disabled(self, execute_mock):
|
||||
|
@ -12,7 +12,6 @@
|
||||
|
||||
from unittest import mock
|
||||
|
||||
from ironic_lib import utils as ilib_utils
|
||||
from oslo_concurrency import processutils
|
||||
|
||||
from ironic_python_agent import disk_utils
|
||||
@ -158,7 +157,7 @@ class TestRaidUtils(base.IronicAgentTest):
|
||||
@mock.patch.object(raid_utils, 'get_next_free_raid_device', autospec=True,
|
||||
return_value='/dev/md42')
|
||||
@mock.patch.object(hardware, 'dispatch_to_managers', autospec=True)
|
||||
@mock.patch.object(ilib_utils, 'execute', autospec=True)
|
||||
@mock.patch.object(utils, 'execute', autospec=True)
|
||||
@mock.patch.object(disk_utils, 'find_efi_partition', autospec=True)
|
||||
def test_prepare_boot_partitions_for_softraid_uefi_gpt(
|
||||
self, mock_efi_part, mock_execute, mock_dispatch,
|
||||
@ -216,9 +215,9 @@ class TestRaidUtils(base.IronicAgentTest):
|
||||
@mock.patch.object(raid_utils, 'get_next_free_raid_device', autospec=True,
|
||||
return_value='/dev/md42')
|
||||
@mock.patch.object(hardware, 'dispatch_to_managers', autospec=True)
|
||||
@mock.patch.object(ilib_utils, 'execute', autospec=True)
|
||||
@mock.patch.object(utils, 'execute', autospec=True)
|
||||
@mock.patch.object(disk_utils, 'find_efi_partition', autospec=True)
|
||||
@mock.patch.object(ilib_utils, 'mkfs', autospec=True)
|
||||
@mock.patch.object(utils, 'mkfs', autospec=True)
|
||||
def test_prepare_boot_partitions_for_softraid_uefi_gpt_esp_not_found(
|
||||
self, mock_mkfs, mock_efi_part, mock_execute, mock_dispatch,
|
||||
mock_free_raid_device, mock_rescan, mock_find_esp):
|
||||
@ -271,7 +270,7 @@ class TestRaidUtils(base.IronicAgentTest):
|
||||
@mock.patch.object(raid_utils, 'get_next_free_raid_device', autospec=True,
|
||||
return_value='/dev/md42')
|
||||
@mock.patch.object(hardware, 'dispatch_to_managers', autospec=True)
|
||||
@mock.patch.object(ilib_utils, 'execute', autospec=True)
|
||||
@mock.patch.object(utils, 'execute', autospec=True)
|
||||
def test_prepare_boot_partitions_for_softraid_uefi_gpt_efi_provided(
|
||||
self, mock_execute, mock_dispatch, mock_free_raid_device,
|
||||
mock_rescan, mock_find_esp):
|
||||
@ -321,7 +320,7 @@ class TestRaidUtils(base.IronicAgentTest):
|
||||
self.assertEqual(efi_part, '/dev/md42')
|
||||
|
||||
@mock.patch.object(hardware, 'dispatch_to_managers', autospec=True)
|
||||
@mock.patch.object(ilib_utils, 'execute', autospec=True)
|
||||
@mock.patch.object(utils, 'execute', autospec=True)
|
||||
@mock.patch.object(disk_utils, 'get_partition_table_type', autospec=True,
|
||||
return_value='msdos')
|
||||
def test_prepare_boot_partitions_for_softraid_bios_msdos(
|
||||
@ -339,7 +338,7 @@ class TestRaidUtils(base.IronicAgentTest):
|
||||
self.assertIsNone(efi_part)
|
||||
|
||||
@mock.patch.object(hardware, 'dispatch_to_managers', autospec=True)
|
||||
@mock.patch.object(ilib_utils, 'execute', autospec=True)
|
||||
@mock.patch.object(utils, 'execute', autospec=True)
|
||||
@mock.patch.object(disk_utils, 'get_partition_table_type', autospec=True,
|
||||
return_value='gpt')
|
||||
def test_prepare_boot_partitions_for_softraid_bios_gpt(
|
||||
|
@ -25,35 +25,20 @@ import tempfile
|
||||
import time
|
||||
from unittest import mock
|
||||
|
||||
from ironic_lib import utils as ironic_utils
|
||||
from oslo_concurrency import processutils
|
||||
import requests
|
||||
import testtools
|
||||
|
||||
from ironic_python_agent import errors
|
||||
from ironic_python_agent import hardware
|
||||
from ironic_python_agent.tests.unit import base as ironic_agent_base
|
||||
from ironic_python_agent.tests.unit import base
|
||||
from ironic_python_agent import utils
|
||||
|
||||
|
||||
class ExecuteTestCase(ironic_agent_base.IronicAgentTest):
|
||||
# This test case does call utils.execute(), so don't block access to the
|
||||
# execute calls.
|
||||
block_execute = False
|
||||
|
||||
# We do mock out the call to ironic_utils.execute() so we don't actually
|
||||
# 'execute' anything, as utils.execute() calls ironic_utils.execute()
|
||||
@mock.patch.object(ironic_utils, 'execute', autospec=True)
|
||||
def test_execute(self, mock_execute):
|
||||
utils.execute('/usr/bin/env', 'false', check_exit_code=False)
|
||||
mock_execute.assert_called_once_with('/usr/bin/env', 'false',
|
||||
check_exit_code=False)
|
||||
|
||||
|
||||
@mock.patch('shutil.rmtree', autospec=True)
|
||||
@mock.patch.object(ironic_utils, 'execute', autospec=True)
|
||||
@mock.patch.object(utils, 'execute', autospec=True)
|
||||
@mock.patch('tempfile.mkdtemp', autospec=True)
|
||||
class MountedTestCase(ironic_agent_base.IronicAgentTest):
|
||||
class MountedTestCase(base.IronicAgentTest):
|
||||
|
||||
def test_temporary(self, mock_temp, mock_execute, mock_rmtree):
|
||||
with utils.mounted('/dev/fake') as path:
|
||||
@ -123,7 +108,7 @@ class MountedTestCase(ironic_agent_base.IronicAgentTest):
|
||||
self.assertFalse(mock_rmtree.called)
|
||||
|
||||
|
||||
class GetAgentParamsTestCase(ironic_agent_base.IronicAgentTest):
|
||||
class GetAgentParamsTestCase(base.IronicAgentTest):
|
||||
|
||||
@mock.patch('oslo_log.log.getLogger', autospec=True)
|
||||
@mock.patch('builtins.open', autospec=True)
|
||||
@ -352,7 +337,7 @@ class TestFailures(testtools.TestCase):
|
||||
self.assertRaisesRegex(FakeException, 'foo', f.raise_if_needed)
|
||||
|
||||
|
||||
class TestUtils(ironic_agent_base.IronicAgentTest):
|
||||
class TestUtils(base.IronicAgentTest):
|
||||
|
||||
def _get_journalctl_output(self, mock_execute, lines=None, units=None):
|
||||
contents = b'Krusty Krab'
|
||||
@ -870,7 +855,7 @@ class TestRemoveKeys(testtools.TestCase):
|
||||
|
||||
|
||||
@mock.patch.object(utils, 'execute', autospec=True)
|
||||
class TestClockSyncUtils(ironic_agent_base.IronicAgentTest):
|
||||
class TestClockSyncUtils(base.IronicAgentTest):
|
||||
|
||||
def test_determine_time_method_none(self, mock_execute):
|
||||
mock_execute.side_effect = OSError
|
||||
@ -1113,7 +1098,7 @@ class TestCopyConfigFromVmedia(testtools.TestCase):
|
||||
|
||||
|
||||
@mock.patch.object(requests, 'get', autospec=True)
|
||||
class TestStreamingClient(ironic_agent_base.IronicAgentTest):
|
||||
class TestStreamingClient(base.IronicAgentTest):
|
||||
|
||||
def test_ok(self, mock_get):
|
||||
client = utils.StreamingClient()
|
||||
@ -1142,7 +1127,7 @@ class TestStreamingClient(ironic_agent_base.IronicAgentTest):
|
||||
self.assertEqual(2, mock_get.call_count)
|
||||
|
||||
|
||||
class TestCheckVirtualMedia(ironic_agent_base.IronicAgentTest):
|
||||
class TestCheckVirtualMedia(base.IronicAgentTest):
|
||||
|
||||
@mock.patch.object(utils, 'execute', autospec=True)
|
||||
def test_check_vmedia_device(self, mock_execute):
|
||||
@ -1209,7 +1194,7 @@ class TestCheckVirtualMedia(ironic_agent_base.IronicAgentTest):
|
||||
'/dev/sdh')
|
||||
|
||||
|
||||
class TestCheckEarlyLogging(ironic_agent_base.IronicAgentTest):
|
||||
class TestCheckEarlyLogging(base.IronicAgentTest):
|
||||
|
||||
@mock.patch.object(utils, 'LOG', autospec=True)
|
||||
def test_early_logging_goes_to_logger(self, mock_log):
|
||||
@ -1232,7 +1217,7 @@ class TestCheckEarlyLogging(ironic_agent_base.IronicAgentTest):
|
||||
info.assert_has_calls(expected_calls)
|
||||
|
||||
|
||||
class TestUnmountOfConfig(ironic_agent_base.IronicAgentTest):
|
||||
class TestUnmountOfConfig(base.IronicAgentTest):
|
||||
|
||||
@mock.patch.object(utils, '_early_log', autospec=True)
|
||||
@mock.patch.object(os.path, 'ismount', autospec=True)
|
||||
@ -1247,3 +1232,188 @@ class TestUnmountOfConfig(ironic_agent_base.IronicAgentTest):
|
||||
mock_exec.assert_has_calls([
|
||||
mock.call('umount', '/mnt/config'),
|
||||
mock.call('umount', '/mnt/config')])
|
||||
|
||||
|
||||
class BareMetalUtilsTestCase(base.IronicAgentTest):
|
||||
|
||||
def test_unlink(self):
|
||||
with mock.patch.object(os, "unlink", autospec=True) as unlink_mock:
|
||||
unlink_mock.return_value = None
|
||||
utils.unlink_without_raise("/fake/path")
|
||||
unlink_mock.assert_called_once_with("/fake/path")
|
||||
|
||||
def test_unlink_ENOENT(self):
|
||||
with mock.patch.object(os, "unlink", autospec=True) as unlink_mock:
|
||||
unlink_mock.side_effect = OSError(errno.ENOENT)
|
||||
utils.unlink_without_raise("/fake/path")
|
||||
unlink_mock.assert_called_once_with("/fake/path")
|
||||
|
||||
|
||||
class ExecuteTestCase(base.IronicAgentTest):
|
||||
# Allow calls to utils.execute() and related functions
|
||||
block_execute = False
|
||||
|
||||
@mock.patch.object(processutils, 'execute', autospec=True)
|
||||
@mock.patch.object(os.environ, 'copy', return_value={}, autospec=True)
|
||||
def test_execute_use_standard_locale_no_env_variables(self, env_mock,
|
||||
execute_mock):
|
||||
utils.execute('foo', use_standard_locale=True)
|
||||
execute_mock.assert_called_once_with('foo',
|
||||
env_variables={'LC_ALL': 'C'})
|
||||
|
||||
@mock.patch.object(processutils, 'execute', autospec=True)
|
||||
def test_execute_use_standard_locale_with_env_variables(self,
|
||||
execute_mock):
|
||||
utils.execute('foo', use_standard_locale=True,
|
||||
env_variables={'foo': 'bar'})
|
||||
execute_mock.assert_called_once_with('foo',
|
||||
env_variables={'LC_ALL': 'C',
|
||||
'foo': 'bar'})
|
||||
|
||||
@mock.patch.object(processutils, 'execute', autospec=True)
|
||||
def test_execute_not_use_standard_locale(self, execute_mock):
|
||||
utils.execute('foo', use_standard_locale=False,
|
||||
env_variables={'foo': 'bar'})
|
||||
execute_mock.assert_called_once_with('foo',
|
||||
env_variables={'foo': 'bar'})
|
||||
|
||||
@mock.patch.object(utils, 'LOG', autospec=True)
|
||||
def _test_execute_with_log_stdout(self, log_mock, log_stdout=None):
|
||||
with mock.patch.object(
|
||||
processutils, 'execute', autospec=True) as execute_mock:
|
||||
execute_mock.return_value = ('stdout', 'stderr')
|
||||
if log_stdout is not None:
|
||||
utils.execute('foo', log_stdout=log_stdout)
|
||||
else:
|
||||
utils.execute('foo')
|
||||
execute_mock.assert_called_once_with('foo')
|
||||
name, args, kwargs = log_mock.debug.mock_calls[0]
|
||||
if log_stdout is False:
|
||||
self.assertEqual(1, log_mock.debug.call_count)
|
||||
self.assertNotIn('stdout', args[0])
|
||||
else:
|
||||
self.assertEqual(2, log_mock.debug.call_count)
|
||||
self.assertIn('stdout', args[0])
|
||||
|
||||
def test_execute_with_log_stdout_default(self):
|
||||
self._test_execute_with_log_stdout()
|
||||
|
||||
def test_execute_with_log_stdout_true(self):
|
||||
self._test_execute_with_log_stdout(log_stdout=True)
|
||||
|
||||
def test_execute_with_log_stdout_false(self):
|
||||
self._test_execute_with_log_stdout(log_stdout=False)
|
||||
|
||||
@mock.patch.object(utils, 'LOG', autospec=True)
|
||||
@mock.patch.object(processutils, 'execute', autospec=True)
|
||||
def test_execute_command_not_found(self, execute_mock, log_mock):
|
||||
execute_mock.side_effect = FileNotFoundError
|
||||
self.assertRaises(FileNotFoundError, utils.execute, 'foo')
|
||||
execute_mock.assert_called_once_with('foo')
|
||||
name, args, kwargs = log_mock.debug.mock_calls[0]
|
||||
self.assertEqual(1, log_mock.debug.call_count)
|
||||
self.assertIn('not found', args[0])
|
||||
|
||||
|
||||
class MkfsTestCase(base.IronicAgentTest):
|
||||
|
||||
@mock.patch.object(utils, 'execute', autospec=True)
|
||||
def test_mkfs(self, execute_mock):
|
||||
utils.mkfs('ext4', '/my/block/dev')
|
||||
utils.mkfs('msdos', '/my/msdos/block/dev')
|
||||
utils.mkfs('swap', '/my/swap/block/dev')
|
||||
|
||||
expected = [mock.call('mkfs', '-t', 'ext4', '-F', '/my/block/dev',
|
||||
use_standard_locale=True),
|
||||
mock.call('mkfs', '-t', 'msdos', '/my/msdos/block/dev',
|
||||
use_standard_locale=True),
|
||||
mock.call('mkswap', '/my/swap/block/dev',
|
||||
use_standard_locale=True)]
|
||||
self.assertEqual(expected, execute_mock.call_args_list)
|
||||
|
||||
@mock.patch.object(utils, 'execute', autospec=True)
|
||||
def test_mkfs_with_label(self, execute_mock):
|
||||
utils.mkfs('ext4', '/my/block/dev', 'ext4-vol')
|
||||
utils.mkfs('msdos', '/my/msdos/block/dev', 'msdos-vol')
|
||||
utils.mkfs('swap', '/my/swap/block/dev', 'swap-vol')
|
||||
|
||||
expected = [mock.call('mkfs', '-t', 'ext4', '-F', '-L', 'ext4-vol',
|
||||
'/my/block/dev',
|
||||
use_standard_locale=True),
|
||||
mock.call('mkfs', '-t', 'msdos', '-n', 'msdos-vol',
|
||||
'/my/msdos/block/dev',
|
||||
use_standard_locale=True),
|
||||
mock.call('mkswap', '-L', 'swap-vol',
|
||||
'/my/swap/block/dev',
|
||||
use_standard_locale=True)]
|
||||
self.assertEqual(expected, execute_mock.call_args_list)
|
||||
|
||||
@mock.patch.object(utils, 'execute', autospec=True,
|
||||
side_effect=processutils.ProcessExecutionError(
|
||||
stderr=os.strerror(errno.ENOENT)))
|
||||
def test_mkfs_with_unsupported_fs(self, execute_mock):
|
||||
self.assertRaises(errors.FileSystemNotSupported,
|
||||
utils.mkfs, 'foo', '/my/block/dev')
|
||||
|
||||
@mock.patch.object(utils, 'execute', autospec=True,
|
||||
side_effect=processutils.ProcessExecutionError(
|
||||
stderr='fake'))
|
||||
def test_mkfs_with_unexpected_error(self, execute_mock):
|
||||
self.assertRaises(processutils.ProcessExecutionError, utils.mkfs,
|
||||
'ext4', '/my/block/dev', 'ext4-vol')
|
||||
|
||||
|
||||
@mock.patch.object(utils, 'execute', autospec=True)
|
||||
class GetRouteSourceTestCase(base.IronicAgentTest):
|
||||
|
||||
def test_get_route_source_ipv4(self, mock_execute):
|
||||
mock_execute.return_value = ('XXX src 1.2.3.4 XXX\n cache', None)
|
||||
|
||||
source = utils.get_route_source('XXX')
|
||||
self.assertEqual('1.2.3.4', source)
|
||||
|
||||
def test_get_route_source_ipv6(self, mock_execute):
|
||||
mock_execute.return_value = ('XXX src 1:2::3:4 metric XXX\n cache',
|
||||
None)
|
||||
|
||||
source = utils.get_route_source('XXX')
|
||||
self.assertEqual('1:2::3:4', source)
|
||||
|
||||
def test_get_route_source_ipv6_linklocal(self, mock_execute):
|
||||
mock_execute.return_value = (
|
||||
'XXX src fe80::1234:1234:1234:1234 metric XXX\n cache', None)
|
||||
|
||||
source = utils.get_route_source('XXX')
|
||||
self.assertIsNone(source)
|
||||
|
||||
def test_get_route_source_ipv6_linklocal_allowed(self, mock_execute):
|
||||
mock_execute.return_value = (
|
||||
'XXX src fe80::1234:1234:1234:1234 metric XXX\n cache', None)
|
||||
|
||||
source = utils.get_route_source('XXX', ignore_link_local=False)
|
||||
self.assertEqual('fe80::1234:1234:1234:1234', source)
|
||||
|
||||
def test_get_route_source_indexerror(self, mock_execute):
|
||||
mock_execute.return_value = ('XXX src \n cache', None)
|
||||
|
||||
source = utils.get_route_source('XXX')
|
||||
self.assertIsNone(source)
|
||||
|
||||
|
||||
class ParseDeviceTagsTestCase(base.IronicAgentTest):
|
||||
|
||||
def test_empty(self):
|
||||
result = utils.parse_device_tags("\n\n")
|
||||
self.assertEqual([], list(result))
|
||||
|
||||
def test_parse(self):
|
||||
tags = """
|
||||
PTUUID="00016a50" PTTYPE="dos" LABEL=""
|
||||
TYPE="vfat" PART_ENTRY_SCHEME="gpt" PART_ENTRY_NAME="EFI System Partition"
|
||||
"""
|
||||
result = list(utils.parse_device_tags(tags))
|
||||
self.assertEqual([
|
||||
{'PTUUID': '00016a50', 'PTTYPE': 'dos', 'LABEL': ''},
|
||||
{'TYPE': 'vfat', 'PART_ENTRY_SCHEME': 'gpt',
|
||||
'PART_ENTRY_NAME': 'EFI System Partition'}
|
||||
], result)
|
||||
|
@ -19,9 +19,11 @@ import copy
|
||||
import errno
|
||||
import glob
|
||||
import io
|
||||
import ipaddress
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import shlex
|
||||
import shutil
|
||||
import stat
|
||||
import subprocess
|
||||
@ -29,11 +31,12 @@ import sys
|
||||
import tarfile
|
||||
import tempfile
|
||||
import time
|
||||
import warnings
|
||||
|
||||
from ironic_lib import utils as ironic_utils
|
||||
from oslo_concurrency import processutils
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import excutils
|
||||
from oslo_utils import units
|
||||
import requests
|
||||
import tenacity
|
||||
@ -71,12 +74,135 @@ DEVICE_EXTRACTOR = re.compile(r'^(?:(.*\d)p|(.*\D))(?:\d+)$')
|
||||
_EARLY_LOG_BUFFER = []
|
||||
|
||||
|
||||
def execute(*cmd, **kwargs):
|
||||
"""Convenience wrapper around ironic_lib's execute() method.
|
||||
def execute(*cmd, use_standard_locale=False, log_stdout=True, **kwargs):
|
||||
"""Convenience wrapper around oslo's execute() method.
|
||||
|
||||
Executes and logs results from a system command.
|
||||
Executes and logs results from a system command. See docs for
|
||||
oslo_concurrency.processutils.execute for usage.
|
||||
|
||||
:param cmd: positional arguments to pass to processutils.execute()
|
||||
:param use_standard_locale: Defaults to False. If set to True,
|
||||
execute command with standard locale
|
||||
added to environment variables.
|
||||
:param log_stdout: Defaults to True. If set to True, logs the output.
|
||||
:param kwargs: keyword arguments to pass to processutils.execute()
|
||||
:returns: (stdout, stderr) from process execution
|
||||
:raises: UnknownArgumentError on receiving unknown arguments
|
||||
:raises: ProcessExecutionError
|
||||
:raises: OSError
|
||||
"""
|
||||
return ironic_utils.execute(*cmd, **kwargs)
|
||||
if use_standard_locale:
|
||||
env = kwargs.pop('env_variables', os.environ.copy())
|
||||
env['LC_ALL'] = 'C'
|
||||
kwargs['env_variables'] = env
|
||||
|
||||
if kwargs.pop('run_as_root', False):
|
||||
warnings.warn("run_as_root is deprecated and has no effect",
|
||||
DeprecationWarning)
|
||||
|
||||
def _log(stdout, stderr):
|
||||
if log_stdout:
|
||||
try:
|
||||
LOG.debug('Command stdout is: "%s"', stdout)
|
||||
except UnicodeEncodeError:
|
||||
LOG.debug('stdout contains invalid UTF-8 characters')
|
||||
stdout = (stdout.encode('utf8', 'surrogateescape')
|
||||
.decode('utf8', 'ignore'))
|
||||
LOG.debug('Command stdout is: "%s"', stdout)
|
||||
try:
|
||||
LOG.debug('Command stderr is: "%s"', stderr)
|
||||
except UnicodeEncodeError:
|
||||
LOG.debug('stderr contains invalid UTF-8 characters')
|
||||
stderr = (stderr.encode('utf8', 'surrogateescape')
|
||||
.decode('utf8', 'ignore'))
|
||||
LOG.debug('Command stderr is: "%s"', stderr)
|
||||
|
||||
try:
|
||||
result = processutils.execute(*cmd, **kwargs)
|
||||
except FileNotFoundError:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.debug('Command not found: "%s"', ' '.join(map(str, cmd)))
|
||||
except processutils.ProcessExecutionError as exc:
|
||||
with excutils.save_and_reraise_exception():
|
||||
_log(exc.stdout, exc.stderr)
|
||||
else:
|
||||
_log(result[0], result[1])
|
||||
return result
|
||||
|
||||
|
||||
def mkfs(fs, path, label=None):
|
||||
"""Format a file or block device
|
||||
|
||||
:param fs: Filesystem type (examples include 'swap', 'ext3', 'ext4'
|
||||
'btrfs', etc.)
|
||||
:param path: Path to file or block device to format
|
||||
:param label: Volume label to use
|
||||
"""
|
||||
if fs == 'swap':
|
||||
args = ['mkswap']
|
||||
else:
|
||||
args = ['mkfs', '-t', fs]
|
||||
# add -F to force no interactive execute on non-block device.
|
||||
if fs in ('ext3', 'ext4'):
|
||||
args.extend(['-F'])
|
||||
if label:
|
||||
if fs in ('msdos', 'vfat'):
|
||||
label_opt = '-n'
|
||||
else:
|
||||
label_opt = '-L'
|
||||
args.extend([label_opt, label])
|
||||
args.append(path)
|
||||
try:
|
||||
execute(*args, use_standard_locale=True)
|
||||
except processutils.ProcessExecutionError as e:
|
||||
with excutils.save_and_reraise_exception() as ctx:
|
||||
if os.strerror(errno.ENOENT) in e.stderr:
|
||||
ctx.reraise = False
|
||||
LOG.exception('Failed to make file system. '
|
||||
'File system %s is not supported.', fs)
|
||||
raise errors.FileSystemNotSupported(fs=fs)
|
||||
else:
|
||||
LOG.exception('Failed to create a file system '
|
||||
'in %(path)s. Error: %(error)s',
|
||||
{'path': path, 'error': e})
|
||||
|
||||
|
||||
def try_execute(*cmd, **kwargs):
|
||||
"""The same as execute but returns None on error.
|
||||
|
||||
Executes and logs results from a system command. See docs for
|
||||
oslo_concurrency.processutils.execute for usage.
|
||||
|
||||
Instead of raising an exception on failure, this method simply
|
||||
returns None in case of failure.
|
||||
|
||||
:param cmd: positional arguments to pass to processutils.execute()
|
||||
:param kwargs: keyword arguments to pass to processutils.execute()
|
||||
:raises: UnknownArgumentError on receiving unknown arguments
|
||||
:returns: tuple of (stdout, stderr) or None in some error cases
|
||||
"""
|
||||
try:
|
||||
return execute(*cmd, **kwargs)
|
||||
except (processutils.ProcessExecutionError, OSError) as e:
|
||||
LOG.debug('Command failed: %s', e)
|
||||
|
||||
|
||||
def parse_device_tags(output):
|
||||
"""Parse tags from the lsblk/blkid output.
|
||||
|
||||
Parses format KEY="VALUE" KEY2="VALUE2".
|
||||
|
||||
:return: a generator yielding dicts with information from each line.
|
||||
"""
|
||||
for line in output.strip().split('\n'):
|
||||
if line.strip():
|
||||
try:
|
||||
yield {key: value for key, value in
|
||||
(v.split('=', 1) for v in shlex.split(line))}
|
||||
except ValueError as err:
|
||||
raise ValueError(
|
||||
_("Malformed blkid/lsblk output line '%(line)s': %(err)s")
|
||||
% {'line': line, 'err': err})
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
@ -108,15 +234,15 @@ def mounted(source, dest=None, opts=None, fs_type=None,
|
||||
|
||||
mounted = False
|
||||
try:
|
||||
ironic_utils.execute("mount", source, dest, *params,
|
||||
attempts=mount_attempts, delay_on_retry=True)
|
||||
execute("mount", source, dest, *params,
|
||||
attempts=mount_attempts, delay_on_retry=True)
|
||||
mounted = True
|
||||
yield dest
|
||||
finally:
|
||||
if mounted:
|
||||
try:
|
||||
ironic_utils.execute("umount", dest, attempts=umount_attempts,
|
||||
delay_on_retry=True)
|
||||
execute("umount", dest, attempts=umount_attempts,
|
||||
delay_on_retry=True)
|
||||
except (EnvironmentError,
|
||||
processutils.ProcessExecutionError) as exc:
|
||||
LOG.warning(
|
||||
@ -134,6 +260,16 @@ def mounted(source, dest=None, opts=None, fs_type=None,
|
||||
{'dest': dest, 'err': exc})
|
||||
|
||||
|
||||
def unlink_without_raise(path):
|
||||
try:
|
||||
os.unlink(path)
|
||||
except OSError as e:
|
||||
if e.errno == errno.ENOENT:
|
||||
return
|
||||
else:
|
||||
LOG.warning(f"Failed to unlink {path}, error: {e}")
|
||||
|
||||
|
||||
def _read_params_from_file(filepath):
|
||||
"""Extract key=value pairs from a file.
|
||||
|
||||
@ -181,7 +317,7 @@ def _find_vmedia_device_by_labels(labels):
|
||||
_early_log('Was unable to execute the lsblk command. %s', e)
|
||||
return
|
||||
|
||||
for device in ironic_utils.parse_device_tags(lsblk_output):
|
||||
for device in parse_device_tags(lsblk_output):
|
||||
for label in labels:
|
||||
if label.upper() == device['LABEL'].upper():
|
||||
candidates.append(device['KNAME'])
|
||||
@ -299,7 +435,7 @@ def _check_vmedia_device(vmedia_device_file):
|
||||
'virtual media identification. %s', e)
|
||||
return False
|
||||
try:
|
||||
for device in ironic_utils.parse_device_tags(output):
|
||||
for device in parse_device_tags(output):
|
||||
if device['TYPE'] == 'part':
|
||||
_early_log('Excluding device %s from virtual media'
|
||||
'consideration as it is a partition.',
|
||||
@ -1040,3 +1176,25 @@ def is_char_device(path):
|
||||
# Likely because of insufficient permission,
|
||||
# race conditions or I/O related errors.
|
||||
return False
|
||||
|
||||
|
||||
def get_route_source(dest, ignore_link_local=True):
|
||||
"""Get the IP address to send packages to destination."""
|
||||
try:
|
||||
out, _err = execute('ip', 'route', 'get', dest)
|
||||
except (EnvironmentError, processutils.ProcessExecutionError) as e:
|
||||
LOG.warning('Cannot get route to host %(dest)s: %(err)s',
|
||||
{'dest': dest, 'err': e})
|
||||
return
|
||||
|
||||
try:
|
||||
source = out.strip().split('\n')[0].split('src')[1].split()[0]
|
||||
if (ipaddress.ip_address(source).is_link_local
|
||||
and ignore_link_local):
|
||||
LOG.debug('Ignoring link-local source to %(dest)s: %(rec)s',
|
||||
{'dest': dest, 'rec': out})
|
||||
return
|
||||
return source
|
||||
except (IndexError, ValueError):
|
||||
LOG.debug('No route to host %(dest)s, route record: %(rec)s',
|
||||
{'dest': dest, 'rec': out})
|
||||
|
@ -11,7 +11,7 @@ pyudev>=0.18 # LGPLv2.1+
|
||||
requests>=2.14.2 # Apache-2.0
|
||||
stevedore>=1.20.0 # Apache-2.0
|
||||
tenacity>=6.2.0 # Apache-2.0
|
||||
ironic-lib>=6.0.0 # Apache-2.0
|
||||
Werkzeug>=2.0.0 # BSD License
|
||||
cryptography>=2.3 # BSD/Apache-2.0
|
||||
tooz>=2.7.2 # Apache-2.0
|
||||
zeroconf>=0.24.0 # LGPL
|
||||
|
Loading…
x
Reference in New Issue
Block a user