Software RAID: Create/delete configurations

This patch proposes to extend the IPA to be able to configure software
RAID devices. For this, the {create,delete}_configuration methods of
the GenericHardwareManager are implemented.

Change-Id: Id20302537f7994982c7584af546a7e7520e9612b
Story: #2004581
Task: #29101
This commit is contained in:
Arne Wiebalck 2019-02-04 13:17:23 +01:00
parent 1684ad707c
commit 2db123d318
4 changed files with 755 additions and 0 deletions

View File

@ -204,6 +204,15 @@ class BlockDeviceError(RESTError):
super(BlockDeviceError, self).__init__(details)
class SoftwareRAIDError(RESTError):
"""Error raised when a Software RAID causes an error."""
message = 'Software RAID caused unknown error'
def __init__(self, details):
super(SoftwareRAIDError, self).__init__(details)
class VirtualMediaBootError(RESTError):
"""Error raised when virtual media device cannot be found for config."""

View File

@ -18,6 +18,7 @@ import functools
import json
from multiprocessing.pool import ThreadPool
import os
import re
import shlex
import time
@ -112,6 +113,104 @@ def _check_for_iscsi():
"Error: %s", e)
def _get_component_devices(raid_device):
"""Get the component devices of a Software RAID device.
Examine an md device and return its constituent devices.
:param raid_device: A Software RAID block device name.
:returns: A list of the component devices.
"""
if not raid_device:
return []
component_devices = []
try:
out, _ = utils.execute('mdadm', '--detail', raid_device,
use_standard_locale=True)
except processutils.ProcessExecutionError as e:
msg = ('Could not get component devices of %(dev)s: %(err)s' %
{'dev': raid_device, 'err': e})
raise errors.SoftwareRAIDError(msg)
lines = out.splitlines()
for line in lines:
if 'active sync' not in line:
continue
device = re.findall(r'/dev/\w+', line)
component_devices += device
return component_devices
def _get_holder_disks(raid_device):
"""Get the holder disks of a Software RAID device.
Examine an md device and return its underlying disks.
:param raid_device: A Software RAID block device name.
:returns: A list of the holder disks.
"""
if not raid_device:
return []
holder_disks = []
try:
out, _ = utils.execute('mdadm', '--detail', raid_device,
use_standard_locale=True)
except processutils.ProcessExecutionError as e:
msg = ('Could not get holder disks of %(dev)s: %(err)s' %
{'dev': raid_device, 'err': e})
raise errors.SoftwareRAIDError(msg)
lines = out.splitlines()
for line in lines:
if 'active sync' not in line:
continue
device = re.findall(r'/dev/\D+', line)
holder_disks += device
return holder_disks
def _is_md_device(raid_device):
"""Check if a device is an md device
Check if a device is a Software RAID (md) device.
:param raid_device: A Software RAID block device name.
:returns: True if the device is an md device, False otherwise.
"""
try:
utils.execute("mdadm --detail {}".format(raid_device))
LOG.debug("%s is an md device", raid_device)
return True
except processutils.ProcessExecutionError:
LOG.debug("%s is not an md device", raid_device)
return False
def _md_restart(raid_device):
"""Restart an md device
Stop and re-assemble a Software RAID (md) device.
:param raid_device: A Software RAID block device name.
:raises: CommandExecutionError in case the restart fails.
"""
try:
component_devices = _get_component_devices(raid_device)
cmd = "mdadm --stop {}".format(raid_device)
utils.execute(cmd)
utils.execute("mdadm --assemble {} {}".format(
raid_device, ' '.join(component_devices)))
except processutils.ProcessExecutionError as e:
error_msg = ('Could not restart md device %(dev)s: %(err)s' %
{'dev': raid_device, 'err': e})
LOG.error(error_msg)
raise errors.CommandExecutionError(error_msg)
def list_all_block_devices(block_type='disk',
ignore_raid=False):
"""List all physical block devices
@ -1151,9 +1250,285 @@ class GenericHardwareManager(HardwareManager):
'interface': 'deploy',
'reboot_requested': False,
'abortable': True
},
{
'step': 'delete_configuration',
'priority': 0,
'interface': 'raid',
'reboot_requested': False,
'abortable': True
},
{
'step': 'create_configuration',
'priority': 0,
'interface': 'raid',
'reboot_requested': False,
'abortable': True
}
]
def create_configuration(self, node, ports):
"""Create a RAID configuration.
Unless overwritten by a local hardware manager, this method
will create a software RAID configuration as read from the
node's 'target_raid_config'.
:param node: A dictionary of the node object.
:param ports: A list of dictionaries containing information
of ports for the node.
:returns: The current RAID configuration in the usual format.
:raises: SoftwareRAIDError if the desired configuration is not
valid or if there was an error when creating the RAID
devices.
"""
LOG.info("Creating Software RAID")
raid_config = node.get('target_raid_config', {})
# No 'software' controller: do nothing. If 'controller' is
# set to 'software' on only one of the drives, the validation
# code will catch it.
software_raid = False
logical_disks = raid_config.get('logical_disks')
for logical_disk in logical_disks:
if logical_disk.get('controller') == 'software':
software_raid = True
break
if not software_raid:
LOG.debug("No Software RAID config found")
return {}
LOG.info("Creating Software RAID")
# Check if the config is compliant with current limitations.
self.validate_configuration(raid_config, node)
# Log the validated target_raid_configuration.
LOG.debug("Target Software RAID configuration: %s", raid_config)
# Make sure there are no partitions yet (or left behind).
block_devices = self.list_block_devices()
block_devices_partitions = self.list_block_devices(
include_partitions=True)
if len(block_devices) != len(block_devices_partitions):
partitions = ' '.join(
partition.name for partition in block_devices_partitions)
msg = "Partitions detected during RAID config: {}". format(
partitions)
raise errors.SoftwareRAIDError(msg)
# Create an MBR partition table on each disk.
# TODO(arne_wiebalck): Check if GPT would work as well.
for block_device in block_devices:
LOG.info("Creating partition table on {}".format(
block_device.name))
try:
utils.execute('parted', block_device.name, '-s', '--',
'mklabel', 'msdos')
except processutils.ProcessExecutionError as e:
msg = "Failed to create partition table on {}: {}".format(
block_device.name, e)
raise errors.SoftwareRAIDError(msg)
# Create the partitions which will become the component devices.
logical_disks = raid_config.get('logical_disks')
sector = '2048s'
for logical_disk in logical_disks:
psize = logical_disk['size_gb']
if psize == 'MAX':
psize = '-1'
else:
psize = int(psize) * 1024
for device in block_devices:
try:
LOG.debug("Creating partition on {}: {} {}".format(
device.name, sector, psize))
utils.execute('parted', device.name, '-s', '-a',
'optimal', '--', 'mkpart', 'primary',
sector, psize)
except processutils.ProcessExecutionError as e:
msg = "Failed to create partitions on {}: {}".format(
device.name, e)
raise errors.SoftwareRAIDError(msg)
sector = psize
# Create the RAID devices.
raid_device_count = len(block_devices)
for index, logical_disk in enumerate(logical_disks):
md_device = '/dev/md%d' % index
component_devices = ' '.join(
device.name + str(index + 1) for device in block_devices)
raid_level = logical_disk['raid_level']
# The schema check allows '1+0', but mdadm knows it as '10'.
if raid_level == '1+0':
raid_level = '10'
try:
LOG.debug("Creating md device {} on {}".format(
md_device, component_devices))
cmd = ("mdadm --create {} --level={} --raid-devices={} {} "
"--force --run --metadata=1").format(
md_device, raid_level, raid_device_count,
component_devices)
utils.execute(cmd)
except processutils.ProcessExecutionError as e:
msg = "Failed to create md device {} on {}: {}".format(
md_device, component_devices, e)
raise errors.SoftwareRAIDError(msg)
LOG.info("Successfully created Software RAID")
return raid_config
def delete_configuration(self, node, ports):
"""Delete a RAID configuration.
Unless overwritten by a local hardware manager, this method
will delete all software RAID devices on the node.
NOTE(arne_wiebalck): It may be worth considering to only
delete RAID devices in the node's 'target_raid_config'. If
that config has been lost, though, the cleanup may become
difficult. So, for now, we delete everything we detect.
:param node: A dictionary of the node object
:param ports: A list of dictionaries containing information
of ports for the node
"""
raid_devices = list_all_block_devices(block_type='raid',
ignore_raid=False)
for raid_device in raid_devices:
LOG.info("Deleting Software RAID device {}".format(
raid_device.name))
component_devices = _get_component_devices(raid_device.name)
LOG.debug("Found component devices {}".format(
component_devices))
holder_disks = _get_holder_disks(raid_device.name)
LOG.debug("Found holder disks {}".format(
holder_disks))
# Remove md devices.
try:
utils.execute('wipefs', '-af', raid_device.name)
except processutils.ProcessExecutionError as e:
msg = "Failed to wipefs {}: {}".format(
raid_device.name, e)
LOG.warning(msg)
try:
utils.execute('mdadm', '--stop', raid_device.name)
except processutils.ProcessExecutionError as e:
msg = "Failed to stop {}: {}".format(
raid_device.name, e)
LOG.warning(msg)
# Remove md metadata from component devices.
for component_device in component_devices:
try:
utils.execute('mdadm', '--examine',
component_device)
except processutils.ProcessExecutionError as e:
if "No md superblock detected" in str(e):
# actually not a component device
continue
else:
msg = "Failed to examine device {}: {}".format(
component_device, e)
raise errors.SoftwareRAIDError(msg)
LOG.debug("Deleting md superblock on {}".format(
component_device))
try:
utils.execute('mdadm', '--zero-superblock',
component_device)
except processutils.ProcessExecutionError as e:
msg = "Failed to remove superblock from {}: {}".format(
raid_device.name, e)
LOG.warning(msg)
# Remove the partitions we created during create_configuration.
for holder_disk in holder_disks:
LOG.debug("Removing partitions on {}".format(
holder_disk))
try:
utils.execute('wipefs', '-af', holder_disk)
except processutils.ProcessExecutionError as e:
LOG.warning("Failed to remove partitions on {}".format(
holder_disk))
LOG.info("Deleted Software RAID device {}".format(
raid_device.name))
LOG.debug("Finished deleting Software RAID(s)")
def validate_configuration(self, raid_config, node):
"""Validate a (software) RAID configuration
Validate a given raid_config, in particular with respect to
the limitations of the current implementation of software
RAID support.
:param raid_config: The current RAID configuration in the usual format.
"""
LOG.debug("Validating Software RAID config: {}".format(raid_config))
if not raid_config:
LOG.error("No RAID config passed")
return False
logical_disks = raid_config.get('logical_disks')
if not logical_disks:
msg = "RAID config contains no logical disks"
raise errors.SoftwareRAIDError(msg)
raid_errors = []
# Only one or two RAID devices are supported for now.
if len(logical_disks) not in [1, 2]:
msg = ("Software RAID configuration requires one or "
"two logical disks")
raid_errors.append(msg)
# All disks need to be flagged for Software RAID
for logical_disk in logical_disks:
if logical_disk.get('controller') != 'software':
msg = ("Software RAID configuration requires all logical "
"disks to have 'controller'='software'")
raid_errors.append(msg)
# The first RAID device needs to be RAID-1.
if logical_disks[0]['raid_level'] != '1':
msg = ("Software RAID Configuration requires RAID-1 for the "
"first logical disk")
raid_errors.append(msg)
# Additional checks when we have two RAID devices.
if len(logical_disks) == 2:
size1 = logical_disks[0]['size_gb']
size2 = logical_disks[1]['size_gb']
# Only one logical disk is allowed to span the whole device.
if size1 == 'MAX' and size2 == 'MAX':
msg = ("Software RAID can have only one RAID device with "
"size 'MAX'")
raid_errors.append(msg)
# Check the accepted RAID levels.
accepted_levels = ['0', '1', '1+0']
current_level = logical_disks[1]['raid_level']
if current_level not in accepted_levels:
msg = ("Software RAID configuration does not support "
"RAID level %s" % current_level)
raid_errors.append(msg)
if raid_errors:
error = ('Could not validate Software RAID config for %(node)s: '
'%(errors)s') % {'node': node['uuid'],
'errors': '; '.join(raid_errors)}
raise errors.SoftwareRAIDError(error)
return True
def _compare_extensions(ext1, ext2):
mgr1 = ext1.obj

View File

@ -391,6 +391,35 @@ Copyright (C) 2002-13, Bruce Allen, Christian Franke, www.smartmontools.org
ATA Security is: Unavailable
""") # noqa
MDADM_DETAIL_OUTPUT = ("""
/dev/md0:
Version : 1.0
Creation Time : Fri Feb 15 12:37:44 2019
Raid Level : raid1
Array Size : 1048512 (1023.94 MiB 1073.68 MB)
Used Dev Size : 1048512 (1023.94 MiB 1073.68 MB)
Raid Devices : 2
Total Devices : 2
Persistence : Superblock is persistent
Update Time : Fri Feb 15 12:38:02 2019
State : clean
Active Devices : 2
Working Devices : 2
Failed Devices : 0
Spare Devices : 0
Consistency Policy : resync
Name : abc.xyz.com:0 (local to host abc.xyz.com)
UUID : 83143055:2781ddf5:2c8f44c7:9b45d92e
Events : 17
Number Major Minor RaidDevice State
0 253 64 0 active sync /dev/vde1
1 253 80 1 active sync /dev/vdf1
""")
class FakeHardwareManager(hardware.GenericHardwareManager):
def __init__(self, hardware_support):
@ -453,6 +482,20 @@ class TestGenericHardwareManager(base.IronicAgentTest):
'interface': 'deploy',
'reboot_requested': False,
'abortable': True
},
{
'step': 'delete_configuration',
'priority': 0,
'interface': 'raid',
'reboot_requested': False,
'abortable': True
},
{
'step': 'create_configuration',
'priority': 0,
'interface': 'raid',
'reboot_requested': False,
'abortable': True
}
]
clean_steps = self.hardware.get_clean_steps(self.node, [])
@ -2085,6 +2128,325 @@ class TestGenericHardwareManager(base.IronicAgentTest):
mocked_execute.return_value = '', ''
self.assertEqual('0.0.0.0', self.hardware.get_bmc_address())
@mock.patch.object(utils, 'execute', autospec=True)
def test_validate_configuration_no_configuration(self, mocked_execute):
self.assertRaises(errors.SoftwareRAIDError,
self.hardware.validate_configuration,
self.node, [])
@mock.patch.object(utils, 'execute', autospec=True)
def test_create_configuration(self, mocked_execute):
raid_config = {
"logical_disks": [
{
"size_gb": "100",
"raid_level": "1",
"controller": "software",
},
{
"size_gb": "MAX",
"raid_level": "0",
"controller": "software",
},
]
}
self.node['target_raid_config'] = raid_config
device1 = hardware.BlockDevice('/dev/sda', 'sda', 1073741824, True)
device2 = hardware.BlockDevice('/dev/sdb', 'sdb', 1073741824, True)
self.hardware.list_block_devices = mock.Mock()
self.hardware.list_block_devices.return_value = [device1, device2]
result = self.hardware.create_configuration(self.node, [])
cmd_md0 = ("mdadm --create /dev/md0 --level=1 --raid-devices=2 "
"/dev/sda1 /dev/sdb1 --force --run --metadata=1")
cmd_md1 = ("mdadm --create /dev/md1 --level=0 --raid-devices=2 "
"/dev/sda2 /dev/sdb2 --force --run --metadata=1")
mocked_execute.assert_has_calls([
mock.call('parted', '/dev/sda', '-s', '--', 'mklabel', 'msdos'),
mock.call('parted', '/dev/sdb', '-s', '--', 'mklabel', 'msdos'),
mock.call('parted', '/dev/sda', '-s', '-a', 'optimal', '--',
'mkpart', 'primary', '2048s', 102400),
mock.call('parted', '/dev/sdb', '-s', '-a', 'optimal', '--',
'mkpart', 'primary', '2048s', 102400),
mock.call('parted', '/dev/sda', '-s', '-a', 'optimal', '--',
'mkpart', 'primary', 102400, '-1'),
mock.call('parted', '/dev/sdb', '-s', '-a', 'optimal', '--',
'mkpart', 'primary', 102400, '-1'),
mock.call(cmd_md0),
mock.call(cmd_md1)])
self.assertEqual(raid_config, result)
@mock.patch.object(utils, 'execute', autospec=True)
def test_create_configuration_invalid_raid_config(self, mocked_execute):
raid_config = {
"logical_disks": [
{
"size_gb": "MAX",
"raid_level": "1",
"controller": "software",
},
{
"size_gb": "MAX",
"raid_level": "0",
"controller": "software",
},
]
}
self.node['target_raid_config'] = raid_config
self.assertRaises(errors.SoftwareRAIDError,
self.hardware.create_configuration,
self.node, [])
@mock.patch.object(utils, 'execute', autospec=True)
def test_create_configuration_partitions_detected(self, mocked_execute):
raid_config = {
"logical_disks": [
{
"size_gb": "100",
"raid_level": "1",
"controller": "software",
},
{
"size_gb": "MAX",
"raid_level": "0",
"controller": "software",
},
]
}
self.node['target_raid_config'] = raid_config
device1 = hardware.BlockDevice('/dev/sda', 'sda', 1073741824, True)
device2 = hardware.BlockDevice('/dev/sdb', 'sdb', 1073741824, True)
partition1 = hardware.BlockDevice('/dev/sdb1', 'sdb1', 268435456, True)
self.hardware.list_block_devices = mock.Mock()
self.hardware.list_block_devices.side_effect = [
[device1, device2],
[device1, device2, partition1]]
self.assertRaises(errors.SoftwareRAIDError,
self.hardware.create_configuration,
self.node, [])
@mock.patch.object(utils, 'execute', autospec=True)
def test_create_configuration_device_handling_failures(self,
mocked_execute):
raid_config = {
"logical_disks": [
{
"size_gb": "100",
"raid_level": "1",
"controller": "software",
},
{
"size_gb": "MAX",
"raid_level": "0",
"controller": "software",
},
]
}
self.node['target_raid_config'] = raid_config
device1 = hardware.BlockDevice('/dev/sda', 'sda', 1073741824, True)
device2 = hardware.BlockDevice('/dev/sdb', 'sdb', 1073741824, True)
self.hardware.list_block_devices = mock.Mock()
self.hardware.list_block_devices.side_effect = [
[device1, device2],
[device1, device2],
[device1, device2],
[device1, device2],
[device1, device2],
[device1, device2]]
# partition table creation
error_regex = "Failed to create partition table on /dev/sda"
mocked_execute.side_effect = [
processutils.ProcessExecutionError]
self.assertRaisesRegex(errors.SoftwareRAIDError, error_regex,
self.hardware.create_configuration,
self.node, [])
# partition creation
error_regex = "Failed to create partitions on /dev/sda"
mocked_execute.side_effect = [
None, None, # partition tables on sd{a,b}
processutils.ProcessExecutionError]
self.assertRaisesRegex(errors.SoftwareRAIDError, error_regex,
self.hardware.create_configuration,
self.node, [])
# raid device creation
error_regex = ("Failed to create md device /dev/md0 "
"on /dev/sda1 /dev/sdb1")
mocked_execute.side_effect = [
None, None, # partition tables on sd{a,b}
None, None, # RAID-1 partitions on sd{a,b}
None, None, # RAID-N partitions on sd{a,b}
processutils.ProcessExecutionError]
self.assertRaisesRegex(errors.SoftwareRAIDError, error_regex,
self.hardware.create_configuration,
self.node, [])
@mock.patch.object(utils, 'execute', autospec=True)
def test__get_component_devices(self, mocked_execute):
mocked_execute.side_effect = [(MDADM_DETAIL_OUTPUT, '')]
raid_device = hardware.BlockDevice('/dev/md0', 'RAID-1',
1073741824, True)
component_devices = hardware._get_component_devices(raid_device.name)
self.assertEqual(['/dev/vde1', '/dev/vdf1'], component_devices)
@mock.patch.object(utils, 'execute', autospec=True)
def test__get_holder_disks(self, mocked_execute):
mocked_execute.side_effect = [(MDADM_DETAIL_OUTPUT, '')]
raid_device = hardware.BlockDevice('/dev/md0', 'RAID-1',
1073741824, True)
holder_disks = hardware._get_holder_disks(raid_device.name)
self.assertEqual(['/dev/vde', '/dev/vdf'], holder_disks)
@mock.patch.object(hardware, 'list_all_block_devices', autospec=True)
@mock.patch.object(utils, 'execute', autospec=True)
def test_delete_configuration(self, mocked_execute, mocked_list):
raid_device1 = hardware.BlockDevice('/dev/md0', 'RAID-1',
1073741824, True)
raid_device2 = hardware.BlockDevice('/dev/md1', 'RAID-0',
2147483648, True)
hardware.list_all_block_devices.side_effect = [
[raid_device1, raid_device2]]
hardware._get_component_devices = mock.Mock()
hardware._get_component_devices.side_effect = [
["/dev/sda1", "/dev/sda2"],
["/dev/sdb1", "/dev/sdb2"]]
hardware._get_holder_disks = mock.Mock()
hardware._get_holder_disks.side_effect = [
["/dev/sda", "/dev/sdb"],
["/dev/sda", "/dev/sdb"]]
mocked_execute.side_effect = [
None, None, None,
['_', 'mdadm --examine output for sda1'],
None,
['_', 'mdadm --examine output for sdb1'],
None, None, None,
None, None, None,
['_', 'mdadm --examine output for sda2'],
None,
['_', 'mdadm --examine output for sdb2'],
None, None, None]
self.hardware.delete_configuration(self.node, [])
mocked_execute.assert_has_calls([
mock.call('wipefs', '-af', '/dev/md0'),
mock.call('mdadm', '--stop', '/dev/md0'),
mock.call('mdadm', '--examine', '/dev/sda1'),
mock.call('mdadm', '--zero-superblock', '/dev/sda1'),
mock.call('mdadm', '--examine', '/dev/sda2'),
mock.call('mdadm', '--zero-superblock', '/dev/sda2'),
mock.call('wipefs', '-af', '/dev/sda'),
mock.call('wipefs', '-af', '/dev/sdb'),
mock.call('wipefs', '-af', '/dev/md1'),
mock.call('mdadm', '--stop', '/dev/md1'),
mock.call('mdadm', '--examine', '/dev/sdb1'),
mock.call('mdadm', '--zero-superblock', '/dev/sdb1'),
mock.call('mdadm', '--examine', '/dev/sdb2'),
mock.call('mdadm', '--zero-superblock', '/dev/sdb2'),
mock.call('wipefs', '-af', '/dev/sda'),
mock.call('wipefs', '-af', '/dev/sdb')])
@mock.patch.object(utils, 'execute', autospec=True)
def test_validate_configuration_valid_raid1(self, mocked_execute):
raid_config = {
"logical_disks": [
{
"size_gb": "MAX",
"raid_level": "1",
"controller": "software",
},
]
}
self.assertEqual(True,
self.hardware.validate_configuration(raid_config,
self.node))
@mock.patch.object(utils, 'execute', autospec=True)
def test_validate_configuration_valid_raid1_raidN(self, mocked_execute):
raid_config = {
"logical_disks": [
{
"size_gb": "100",
"raid_level": "1",
"controller": "software",
},
{
"size_gb": "MAX",
"raid_level": "0",
"controller": "software",
},
]
}
self.assertEqual(True,
self.hardware.validate_configuration(raid_config,
self.node))
@mock.patch.object(utils, 'execute', autospec=True)
def test_validate_configuration_invalid_MAX_MAX(self, mocked_execute):
raid_config = {
"logical_disks": [
{
"size_gb": "MAX",
"raid_level": "1",
"controller": "software",
},
{
"size_gb": "MAX",
"raid_level": "0",
"controller": "software",
},
]
}
self.assertRaises(errors.SoftwareRAIDError,
self.hardware.validate_configuration,
raid_config, self.node)
@mock.patch.object(utils, 'execute', autospec=True)
def test_validate_configuration_invalid_raid_level(self, mocked_execute):
raid_config = {
"logical_disks": [
{
"size_gb": "MAX",
"raid_level": "1",
"controller": "software",
},
{
"size_gb": "MAX",
"raid_level": "42",
"controller": "software",
},
]
}
self.assertRaises(errors.SoftwareRAIDError,
self.hardware.validate_configuration,
raid_config, self.node)
@mock.patch.object(utils, 'execute', autospec=True)
def test_validate_configuration_invalid_no_of_raids(self, mocked_execute):
raid_config = {
"logical_disks": [
{
"size_gb": "MAX",
"raid_level": "1",
"controller": "software",
},
{
"size_gb": "MAX",
"raid_level": "0",
"controller": "software",
},
{
"size_gb": "MAX",
"raid_level": "1+0",
"controller": "software",
},
]
}
self.assertRaises(errors.SoftwareRAIDError,
self.hardware.validate_configuration,
raid_config, self.node)
@mock.patch.object(utils, 'execute', autospec=True)
def test_get_system_vendor_info(self, mocked_execute):
mocked_execute.return_value = LSHW_JSON_OUTPUT

View File

@ -0,0 +1,9 @@
---
features:
- |
Adds support for software RAID via the generic hardware manager. By
means of the target_raid_config a single RAID-1 or one RAID-1 plus
one RAID-N can be configured (where N can be 0, 1, and 1+0). The
RAID is created/deleted during manual cleaning. Note that this
initial implementation will use all available devices for the setup
of the software RAID device(s).