Merge "Smart root disk selection including support for root device hints"
This commit is contained in:
commit
3cdb38109f
@ -569,7 +569,7 @@
|
||||
# the Nova scheduler. Hook 'validate_interfaces' ensures that valid
|
||||
# NIC data was provided by the ramdisk.Do not exclude these two unless
|
||||
# you really know what you're doing. (string value)
|
||||
#default_processing_hooks = ramdisk_error,scheduler,validate_interfaces
|
||||
#default_processing_hooks = ramdisk_error,root_disk_selection,scheduler,validate_interfaces
|
||||
|
||||
# Comma-separated list of enabled hooks for processing pipeline. The
|
||||
# default for this is $default_processing_hooks, hooks can be added
|
||||
@ -603,6 +603,11 @@
|
||||
# column of the Ironic database. (string value)
|
||||
#store_data_location = <None>
|
||||
|
||||
# Whether to leave 1 GiB of disk size untouched for partitioning. Only
|
||||
# has effect when used with the IPA as a ramdisk, for older ramdisk
|
||||
# local_gb is calculated on the ramdisk side. (boolean value)
|
||||
#disk_partitioning_spacing = true
|
||||
|
||||
|
||||
[swift]
|
||||
|
||||
|
@ -131,7 +131,8 @@ PROCESSING_OPTS = [
|
||||
'tested feature, use at your own risk.',
|
||||
deprecated_group='discoverd'),
|
||||
cfg.StrOpt('default_processing_hooks',
|
||||
default='ramdisk_error,scheduler,validate_interfaces',
|
||||
default='ramdisk_error,root_disk_selection,scheduler,'
|
||||
'validate_interfaces',
|
||||
help='Comma-separated list of default hooks for processing '
|
||||
'pipeline. Hook \'scheduler\' updates the node with the '
|
||||
'minimum properties required by the Nova scheduler. '
|
||||
@ -171,6 +172,12 @@ PROCESSING_OPTS = [
|
||||
default=None,
|
||||
help='Name of the key to store the location of stored data in '
|
||||
'the extra column of the Ironic database.'),
|
||||
cfg.BoolOpt('disk_partitioning_spacing',
|
||||
default=True,
|
||||
help='Whether to leave 1 GiB of disk size untouched for '
|
||||
'partitioning. Only has effect when used with the IPA '
|
||||
'as a ramdisk, for older ramdisk local_gb is '
|
||||
'calculated on the ramdisk side.'),
|
||||
]
|
||||
|
||||
|
||||
|
@ -20,6 +20,7 @@ import sys
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log
|
||||
from oslo_utils import units
|
||||
|
||||
from ironic_inspector.common.i18n import _, _LC, _LI, _LW
|
||||
from ironic_inspector import conf
|
||||
@ -30,6 +31,63 @@ CONF = cfg.CONF
|
||||
|
||||
|
||||
LOG = log.getLogger('ironic_inspector.plugins.standard')
|
||||
KNOWN_ROOT_DEVICE_HINTS = ('model', 'vendor', 'serial', 'wwn', 'hctl',
|
||||
'size')
|
||||
|
||||
|
||||
class RootDiskSelectionHook(base.ProcessingHook):
|
||||
"""Smarter root disk selection using Ironic root device hints.
|
||||
|
||||
This hook must always go before SchedulerHook, otherwise root_disk field
|
||||
might not be updated.
|
||||
"""
|
||||
|
||||
def before_update(self, introspection_data, node_info, node_patches,
|
||||
ports_patches, **kwargs):
|
||||
"""Detect root disk from root device hints and IPA inventory."""
|
||||
hints = node_info.node().properties.get('root_device')
|
||||
if not hints:
|
||||
LOG.debug('Root device hints are not provided for node %s',
|
||||
node_info.uuid)
|
||||
return
|
||||
|
||||
inventory = introspection_data.get('inventory')
|
||||
if not inventory:
|
||||
LOG.error(_LW('Root device selection require ironic-python-agent '
|
||||
'as an inspection ramdisk'))
|
||||
# TODO(dtantsur): make it a real error in Mitaka cycle
|
||||
return
|
||||
|
||||
disks = inventory.get('disks', [])
|
||||
if not disks:
|
||||
raise utils.Error(_('No disks found on a node %s') %
|
||||
node_info.uuid)
|
||||
|
||||
for disk in disks:
|
||||
properties = disk.copy()
|
||||
# Root device hints are in GiB, data from IPA is in bytes
|
||||
properties['size'] //= units.Gi
|
||||
|
||||
for name, value in hints.items():
|
||||
actual = properties.get(name)
|
||||
if actual != value:
|
||||
LOG.debug('Disk %(disk)s does not satisfy hint '
|
||||
'%(name)s=%(value)s for node %(node)s, '
|
||||
'actual value is %(actual)s',
|
||||
{'disk': disk.get('name'),
|
||||
'name': name, 'value': value,
|
||||
'node': node_info.uuid, 'actual': actual})
|
||||
break
|
||||
else:
|
||||
LOG.debug('Disk %(disk)s of size %(size)s satisfies '
|
||||
'root device hints for node %(node)s',
|
||||
{'disk': disk.get('name'), 'node': node_info.uuid,
|
||||
'size': disk['size']})
|
||||
introspection_data['root_disk'] = disk
|
||||
return
|
||||
|
||||
raise utils.Error(_('No disks satisfied root device hints for node %s')
|
||||
% node_info.uuid)
|
||||
|
||||
|
||||
class SchedulerHook(base.ProcessingHook):
|
||||
@ -37,8 +95,14 @@ class SchedulerHook(base.ProcessingHook):
|
||||
|
||||
KEYS = ('cpus', 'cpu_arch', 'memory_mb', 'local_gb')
|
||||
|
||||
def before_processing(self, introspection_data, **kwargs):
|
||||
"""Validate that required properties are provided by the ramdisk."""
|
||||
def before_update(self, introspection_data, node_info, **kwargs):
|
||||
"""Update node with scheduler properties."""
|
||||
root_disk = introspection_data.get('root_disk')
|
||||
if root_disk:
|
||||
introspection_data['local_gb'] = root_disk['size'] // units.Gi
|
||||
if CONF.processing.disk_partitioning_spacing:
|
||||
introspection_data['local_gb'] -= 1
|
||||
|
||||
missing = [key for key in self.KEYS if not introspection_data.get(key)]
|
||||
if missing:
|
||||
raise utils.Error(
|
||||
@ -49,8 +113,6 @@ class SchedulerHook(base.ProcessingHook):
|
||||
'memory %(memory_mb)s MiB, disk %(local_gb)s GiB'),
|
||||
{key: introspection_data.get(key) for key in self.KEYS})
|
||||
|
||||
def before_update(self, introspection_data, node_info, **kwargs):
|
||||
"""Update node with scheduler properties."""
|
||||
overwrite = CONF.processing.overwrite_existing
|
||||
properties = {key: str(introspection_data[key])
|
||||
for key in self.KEYS if overwrite or
|
||||
|
@ -22,6 +22,7 @@ import tempfile
|
||||
import unittest
|
||||
|
||||
import mock
|
||||
from oslo_utils import units
|
||||
import requests
|
||||
|
||||
from ironic_inspector import main
|
||||
@ -76,13 +77,49 @@ class Base(base.NodeTest):
|
||||
},
|
||||
'boot_interface': '01-' + self.macs[0].replace(':', '-'),
|
||||
'ipmi_address': self.bmc_address,
|
||||
'inventory': {
|
||||
'disks': [
|
||||
{'name': '/dev/sda', 'model': 'Big Data Disk',
|
||||
'size': 1000 * units.Gi},
|
||||
{'name': '/dev/sdb', 'model': 'Small OS Disk',
|
||||
'size': 20 * units.Gi},
|
||||
]
|
||||
},
|
||||
'root_disk': {'name': '/dev/sda', 'model': 'Big Data Disk',
|
||||
'size': 1000 * units.Gi},
|
||||
}
|
||||
self.data_old_ramdisk = {
|
||||
'cpus': 4,
|
||||
'cpu_arch': 'x86_64',
|
||||
'memory_mb': 12288,
|
||||
'local_gb': 464,
|
||||
'interfaces': {
|
||||
'eth1': {'mac': self.macs[0], 'ip': '1.2.1.2'},
|
||||
'eth2': {'mac': '12:12:21:12:21:12'},
|
||||
'eth3': {'mac': self.macs[1], 'ip': '1.2.1.1'},
|
||||
},
|
||||
'boot_interface': '01-' + self.macs[0].replace(':', '-'),
|
||||
'ipmi_address': self.bmc_address,
|
||||
}
|
||||
|
||||
self.patch = [
|
||||
{'op': 'add', 'path': '/properties/cpus', 'value': '4'},
|
||||
{'path': '/properties/cpu_arch', 'value': 'x86_64', 'op': 'add'},
|
||||
{'op': 'add', 'path': '/properties/memory_mb', 'value': '12288'},
|
||||
{'path': '/properties/local_gb', 'value': '999', 'op': 'add'}
|
||||
]
|
||||
self.patch_old_ramdisk = [
|
||||
{'op': 'add', 'path': '/properties/cpus', 'value': '4'},
|
||||
{'path': '/properties/cpu_arch', 'value': 'x86_64', 'op': 'add'},
|
||||
{'op': 'add', 'path': '/properties/memory_mb', 'value': '12288'},
|
||||
{'path': '/properties/local_gb', 'value': '464', 'op': 'add'}
|
||||
]
|
||||
self.patch_root_hints = [
|
||||
{'op': 'add', 'path': '/properties/cpus', 'value': '4'},
|
||||
{'path': '/properties/cpu_arch', 'value': 'x86_64', 'op': 'add'},
|
||||
{'op': 'add', 'path': '/properties/memory_mb', 'value': '12288'},
|
||||
{'path': '/properties/local_gb', 'value': '19', 'op': 'add'}
|
||||
]
|
||||
|
||||
self.node.power_state = 'power off'
|
||||
|
||||
@ -155,6 +192,27 @@ class Test(Base):
|
||||
status = self.call_get_status(self.uuid)
|
||||
self.assertEqual({'finished': True, 'error': None}, status)
|
||||
|
||||
def test_old_ramdisk(self):
|
||||
self.call_introspect(self.uuid)
|
||||
eventlet.greenthread.sleep(DEFAULT_SLEEP)
|
||||
self.cli.node.set_power_state.assert_called_once_with(self.uuid,
|
||||
'reboot')
|
||||
|
||||
status = self.call_get_status(self.uuid)
|
||||
self.assertEqual({'finished': False, 'error': None}, status)
|
||||
|
||||
res = self.call_continue(self.data_old_ramdisk)
|
||||
self.assertEqual({'uuid': self.uuid}, res)
|
||||
eventlet.greenthread.sleep(DEFAULT_SLEEP)
|
||||
|
||||
self.assertCalledWithPatch(self.patch_old_ramdisk,
|
||||
self.cli.node.update)
|
||||
self.cli.port.create.assert_called_once_with(
|
||||
node_uuid=self.uuid, address='11:22:33:44:55:66')
|
||||
|
||||
status = self.call_get_status(self.uuid)
|
||||
self.assertEqual({'finished': True, 'error': None}, status)
|
||||
|
||||
def test_setup_ipmi(self):
|
||||
patch_credentials = [
|
||||
{'op': 'add', 'path': '/driver_info/ipmi_username',
|
||||
@ -235,8 +293,8 @@ class Test(Base):
|
||||
{
|
||||
'conditions': [
|
||||
{'field': 'memory_mb', 'op': 'eq', 'value': 12288},
|
||||
{'field': 'local_gb', 'op': 'gt', 'value': 400},
|
||||
{'field': 'local_gb', 'op': 'lt', 'value': 500},
|
||||
{'field': 'local_gb', 'op': 'gt', 'value': 998},
|
||||
{'field': 'local_gb', 'op': 'lt', 'value': 1000},
|
||||
],
|
||||
'actions': [
|
||||
{'action': 'set-attribute', 'path': '/extra/foo',
|
||||
@ -271,6 +329,28 @@ class Test(Base):
|
||||
self.uuid,
|
||||
[{'op': 'add', 'path': '/extra/foo', 'value': 'bar'}])
|
||||
|
||||
def test_root_device_hints(self):
|
||||
self.node.properties['root_device'] = {'size': 20}
|
||||
|
||||
self.call_introspect(self.uuid)
|
||||
eventlet.greenthread.sleep(DEFAULT_SLEEP)
|
||||
self.cli.node.set_power_state.assert_called_once_with(self.uuid,
|
||||
'reboot')
|
||||
|
||||
status = self.call_get_status(self.uuid)
|
||||
self.assertEqual({'finished': False, 'error': None}, status)
|
||||
|
||||
res = self.call_continue(self.data)
|
||||
self.assertEqual({'uuid': self.uuid}, res)
|
||||
eventlet.greenthread.sleep(DEFAULT_SLEEP)
|
||||
|
||||
self.assertCalledWithPatch(self.patch_root_hints, self.cli.node.update)
|
||||
self.cli.port.create.assert_called_once_with(
|
||||
node_uuid=self.uuid, address='11:22:33:44:55:66')
|
||||
|
||||
status = self.call_get_status(self.uuid)
|
||||
self.assertEqual({'finished': True, 'error': None}, status)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def mocked_server():
|
||||
|
@ -18,6 +18,7 @@ import tempfile
|
||||
|
||||
import mock
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import units
|
||||
|
||||
from ironic_inspector import node_cache
|
||||
from ironic_inspector.plugins import base
|
||||
@ -49,18 +50,16 @@ class TestSchedulerHook(test_base.NodeTest):
|
||||
ext = base.processing_hooks_manager()['scheduler']
|
||||
self.assertIsInstance(ext.obj, std_plugins.SchedulerHook)
|
||||
|
||||
def test_before_processing(self):
|
||||
self.hook.before_processing(self.data)
|
||||
|
||||
def test_before_processing_missing(self):
|
||||
def test_missing(self):
|
||||
for key in self.data:
|
||||
new_data = self.data.copy()
|
||||
del new_data[key]
|
||||
self.assertRaisesRegexp(utils.Error, key,
|
||||
self.hook.before_processing, new_data)
|
||||
self.hook.before_update, new_data,
|
||||
self.node_info)
|
||||
|
||||
@mock.patch.object(node_cache.NodeInfo, 'patch')
|
||||
def test_before_update(self, mock_patch):
|
||||
def test_ok(self, mock_patch):
|
||||
patch = [
|
||||
{'path': '/properties/cpus', 'value': '2', 'op': 'add'},
|
||||
{'path': '/properties/cpu_arch', 'value': 'x86_64', 'op': 'add'},
|
||||
@ -72,7 +71,7 @@ class TestSchedulerHook(test_base.NodeTest):
|
||||
self.assertCalledWithPatch(patch, mock_patch)
|
||||
|
||||
@mock.patch.object(node_cache.NodeInfo, 'patch')
|
||||
def test_before_update_no_overwrite(self, mock_patch):
|
||||
def test_no_overwrite(self, mock_patch):
|
||||
CONF.set_override('overwrite_existing', False, 'processing')
|
||||
self.node.properties = {
|
||||
'memory_mb': '4096',
|
||||
@ -86,6 +85,33 @@ class TestSchedulerHook(test_base.NodeTest):
|
||||
self.hook.before_update(self.data, self.node_info)
|
||||
self.assertCalledWithPatch(patch, mock_patch)
|
||||
|
||||
@mock.patch.object(node_cache.NodeInfo, 'patch')
|
||||
def test_root_disk(self, mock_patch):
|
||||
self.data['root_disk'] = {'name': '/dev/sda', 'size': 42 * units.Gi}
|
||||
patch = [
|
||||
{'path': '/properties/cpus', 'value': '2', 'op': 'add'},
|
||||
{'path': '/properties/cpu_arch', 'value': 'x86_64', 'op': 'add'},
|
||||
{'path': '/properties/memory_mb', 'value': '1024', 'op': 'add'},
|
||||
{'path': '/properties/local_gb', 'value': '41', 'op': 'add'}
|
||||
]
|
||||
|
||||
self.hook.before_update(self.data, self.node_info)
|
||||
self.assertCalledWithPatch(patch, mock_patch)
|
||||
|
||||
@mock.patch.object(node_cache.NodeInfo, 'patch')
|
||||
def test_root_disk_no_spacing(self, mock_patch):
|
||||
CONF.set_override('disk_partitioning_spacing', False, 'processing')
|
||||
self.data['root_disk'] = {'name': '/dev/sda', 'size': 42 * units.Gi}
|
||||
patch = [
|
||||
{'path': '/properties/cpus', 'value': '2', 'op': 'add'},
|
||||
{'path': '/properties/cpu_arch', 'value': 'x86_64', 'op': 'add'},
|
||||
{'path': '/properties/memory_mb', 'value': '1024', 'op': 'add'},
|
||||
{'path': '/properties/local_gb', 'value': '42', 'op': 'add'}
|
||||
]
|
||||
|
||||
self.hook.before_update(self.data, self.node_info)
|
||||
self.assertCalledWithPatch(patch, mock_patch)
|
||||
|
||||
|
||||
class TestValidateInterfacesHook(test_base.NodeTest):
|
||||
def setUp(self):
|
||||
@ -202,6 +228,85 @@ class TestValidateInterfacesHook(test_base.NodeTest):
|
||||
mock_delete_port.assert_any_call(self.existing_ports[1])
|
||||
|
||||
|
||||
class TestRootDiskSelection(test_base.NodeTest):
|
||||
def setUp(self):
|
||||
super(TestRootDiskSelection, self).setUp()
|
||||
self.hook = std_plugins.RootDiskSelectionHook()
|
||||
self.data = {
|
||||
'inventory': {
|
||||
'disks': [
|
||||
{'model': 'Model 1', 'size': 20 * units.Gi,
|
||||
'name': '/dev/sdb'},
|
||||
{'model': 'Model 2', 'size': 5 * units.Gi,
|
||||
'name': '/dev/sda'},
|
||||
{'model': 'Model 3', 'size': 10 * units.Gi,
|
||||
'name': '/dev/sdc'},
|
||||
{'model': 'Model 4', 'size': 4 * units.Gi,
|
||||
'name': '/dev/sdd'},
|
||||
{'model': 'Too Small', 'size': 1 * units.Gi,
|
||||
'name': '/dev/sde'},
|
||||
]
|
||||
}
|
||||
}
|
||||
self.matched = self.data['inventory']['disks'][2].copy()
|
||||
self.node_info = mock.Mock(spec=node_cache.NodeInfo,
|
||||
uuid=self.uuid,
|
||||
**{'node.return_value': self.node})
|
||||
|
||||
def test_no_hints(self):
|
||||
self.hook.before_update(self.data, self.node_info, None, None)
|
||||
|
||||
self.assertNotIn('local_gb', self.data)
|
||||
self.assertNotIn('root_disk', self.data)
|
||||
|
||||
@mock.patch.object(std_plugins.LOG, 'error')
|
||||
def test_no_inventory(self, mock_log):
|
||||
self.node.properties['root_device'] = {'model': 'foo'}
|
||||
del self.data['inventory']
|
||||
|
||||
self.hook.before_update(self.data, self.node_info, None, None)
|
||||
|
||||
self.assertNotIn('local_gb', self.data)
|
||||
self.assertNotIn('root_disk', self.data)
|
||||
self.assertTrue(mock_log.called)
|
||||
|
||||
def test_no_disks(self):
|
||||
self.node.properties['root_device'] = {'size': 10}
|
||||
self.data['inventory']['disks'] = []
|
||||
|
||||
self.assertRaisesRegexp(utils.Error,
|
||||
'No disks found',
|
||||
self.hook.before_update,
|
||||
self.data, self.node_info, None, None)
|
||||
|
||||
def test_one_matches(self):
|
||||
self.node.properties['root_device'] = {'size': 10}
|
||||
|
||||
self.hook.before_update(self.data, self.node_info, None, None)
|
||||
|
||||
self.assertEqual(self.matched, self.data['root_disk'])
|
||||
|
||||
def test_all_match(self):
|
||||
self.node.properties['root_device'] = {'size': 10,
|
||||
'model': 'Model 3'}
|
||||
|
||||
self.hook.before_update(self.data, self.node_info, None, None)
|
||||
|
||||
self.assertEqual(self.matched, self.data['root_disk'])
|
||||
|
||||
def test_one_fails(self):
|
||||
self.node.properties['root_device'] = {'size': 10,
|
||||
'model': 'Model 42'}
|
||||
|
||||
self.assertRaisesRegexp(utils.Error,
|
||||
'No disks satisfied root device hints',
|
||||
self.hook.before_update,
|
||||
self.data, self.node_info, None, None)
|
||||
|
||||
self.assertNotIn('local_gb', self.data)
|
||||
self.assertNotIn('root_disk', self.data)
|
||||
|
||||
|
||||
class TestRamdiskError(test_base.BaseTest):
|
||||
def setUp(self):
|
||||
super(TestRamdiskError, self).setUp()
|
||||
|
@ -234,8 +234,7 @@ class TestProcessNode(BaseTest):
|
||||
def setUp(self):
|
||||
super(TestProcessNode, self).setUp()
|
||||
CONF.set_override('processing_hooks',
|
||||
'ramdisk_error,scheduler,validate_interfaces,'
|
||||
'example',
|
||||
'$processing.default_processing_hooks,example',
|
||||
'processing')
|
||||
self.validate_attempts = 5
|
||||
self.data['macs'] = self.macs # validate_interfaces hook
|
||||
|
@ -25,10 +25,11 @@ ironic_inspector.hooks.processing =
|
||||
scheduler = ironic_inspector.plugins.standard:SchedulerHook
|
||||
validate_interfaces = ironic_inspector.plugins.standard:ValidateInterfacesHook
|
||||
ramdisk_error = ironic_inspector.plugins.standard:RamdiskErrorHook
|
||||
root_disk_selection = ironic_inspector.plugins.standard:RootDiskSelectionHook
|
||||
example = ironic_inspector.plugins.example:ExampleProcessingHook
|
||||
extra_hardware = ironic_inspector.plugins.extra_hardware:ExtraHardwareHook
|
||||
raid_device = ironic_inspector.plugins.raid_device:RaidDeviceDetection
|
||||
# Deprecated name
|
||||
# Deprecated name for raid_device, don't confuse with root_disk_selection
|
||||
root_device_hint = ironic_inspector.plugins.raid_device:RaidDeviceDetection
|
||||
ironic_inspector.hooks.node_not_found =
|
||||
example = ironic_inspector.plugins.example:example_not_found_hook
|
||||
|
Loading…
Reference in New Issue
Block a user