Convert the object model to use oslo.versionedobjects (OVO)
- Initially this is a naive implementation and not fully utilizing the abilites of OVO - Updated test YAML files to match model - Did not convert Task objects as they will be refactored for Celery in the near future
This commit is contained in:
parent
e8dbac095b
commit
2f1968fa25
@ -15,9 +15,9 @@ from threading import Thread, Lock
|
||||
import uuid
|
||||
import time
|
||||
|
||||
import helm_drydock.objects.fields as hd_fields
|
||||
import helm_drydock.statemgmt as statemgmt
|
||||
import helm_drydock.enum as enum
|
||||
import helm_drydock.model.task as tasks
|
||||
import helm_drydock.objects.task as tasks
|
||||
import helm_drydock.error as errors
|
||||
|
||||
# This is the interface for the orchestrator to access a driver
|
||||
@ -37,7 +37,7 @@ class ProviderDriver(object):
|
||||
self.state_manager = state_manager
|
||||
|
||||
# These are the actions that this driver supports
|
||||
self.supported_actions = [enum.OrchestratorAction.Noop]
|
||||
self.supported_actions = [hd_fields.OrchestratorAction.Noop]
|
||||
|
||||
self.driver_name = "generic"
|
||||
self.driver_key = "generic"
|
||||
@ -81,9 +81,9 @@ class DriverTaskRunner(Thread):
|
||||
self.execute_task()
|
||||
|
||||
def execute_task(self):
|
||||
if self.task.action == enum.OrchestratorAction.Noop:
|
||||
if self.task.action == hd_fields.OrchestratorAction.Noop:
|
||||
self.orchestrator.task_field_update(self.task.get_id(),
|
||||
status=enum.TaskStatus.Running)
|
||||
status=hd_fields.TaskStatus.Running)
|
||||
|
||||
i = 0
|
||||
while i < 5:
|
||||
@ -91,12 +91,12 @@ class DriverTaskRunner(Thread):
|
||||
i = i + 1
|
||||
if self.task.terminate:
|
||||
self.orchestrator.task_field_update(self.task.get_id(),
|
||||
status=enum.TaskStatus.Terminated)
|
||||
status=hd_fields.TaskStatus.Terminated)
|
||||
return
|
||||
else:
|
||||
time.sleep(1)
|
||||
|
||||
self.orchestrator.task_field_update(self.task.get_id(),
|
||||
status=enum.TaskStatus.Complete)
|
||||
status=hd_fields.TaskStatus.Complete)
|
||||
return
|
||||
|
||||
|
@ -19,7 +19,7 @@
|
||||
# initiate_reboot
|
||||
# set_power_off
|
||||
# set_power_on
|
||||
import helm_drydock.enum as enum
|
||||
import helm_drydock.objects.fields as hd_fields
|
||||
import helm_drydock.error as errors
|
||||
|
||||
from helm_drydock.drivers import ProviderDriver
|
||||
@ -29,12 +29,12 @@ class OobDriver(ProviderDriver):
|
||||
def __init__(self, **kwargs):
|
||||
super(OobDriver, self).__init__(**kwargs)
|
||||
|
||||
self.supported_actions = [enum.OobAction.ConfigNodePxe,
|
||||
enum.OobAction.SetNodeBoot,
|
||||
enum.OobAction.PowerOffNode,
|
||||
enum.OobAction.PowerOnNode,
|
||||
enum.OobAction.PowerCycleNode,
|
||||
enum.OobAction.InterrogateNode]
|
||||
self.supported_actions = [hd_fields.OrchestratorAction.ConfigNodePxe,
|
||||
hd_fields.OrchestratorAction.SetNodeBoot,
|
||||
hd_fields.OrchestratorAction.PowerOffNode,
|
||||
hd_fields.OrchestratorAction.PowerOnNode,
|
||||
hd_fields.OrchestratorAction.PowerCycleNode,
|
||||
hd_fields.OrchestratorAction.InterrogateNode]
|
||||
|
||||
self.driver_name = "oob_generic"
|
||||
self.driver_key = "oob_generic"
|
||||
|
@ -16,8 +16,9 @@ import time
|
||||
from pyghmi.ipmi.command import Command
|
||||
|
||||
import helm_drydock.error as errors
|
||||
import helm_drydock.enum as enum
|
||||
import helm_drydock.model.task as task_model
|
||||
|
||||
import helm_drydock.objects.fields as hd_fields
|
||||
import helm_drydock.objects.task as task_model
|
||||
|
||||
import helm_drydock.drivers.oob as oob
|
||||
import helm_drydock.drivers as drivers
|
||||
@ -54,10 +55,9 @@ class PyghmiDriver(oob.OobDriver):
|
||||
(task_id))
|
||||
|
||||
self.orchestrator.task_field_update(task.get_id(),
|
||||
status=enum.TaskStatus.Running)
|
||||
status=hd_fields.TaskStatus.Running)
|
||||
|
||||
site_design = self.orchestrator.get_effective_site(task.site_name,
|
||||
change_id=design_id)
|
||||
site_design = self.orchestrator.get_effective_site(design_id, task.site_name)
|
||||
|
||||
target_nodes = []
|
||||
|
||||
@ -89,9 +89,9 @@ class PyghmiDriver(oob.OobDriver):
|
||||
while len(incomplete_subtasks) > 0:
|
||||
for n in incomplete_subtasks:
|
||||
t = self.state_manager.get_task(n)
|
||||
if t.get_status() in [enum.TaskStatus.Terminated,
|
||||
enum.TaskStatus.Complete,
|
||||
enum.TaskStatus.Errored]:
|
||||
if t.get_status() in [hd_fields.TaskStatus.Terminated,
|
||||
hd_fields.TaskStatus.Complete,
|
||||
hd_fields.TaskStatus.Errored]:
|
||||
incomplete_subtasks.remove(n)
|
||||
time.sleep(2)
|
||||
i = i+1
|
||||
@ -103,11 +103,11 @@ class PyghmiDriver(oob.OobDriver):
|
||||
|
||||
success_subtasks = [x
|
||||
for x in subtasks
|
||||
if x.get_result() == enum.ActionResult.Success]
|
||||
if x.get_result() == hd_fields.ActionResult.Success]
|
||||
nosuccess_subtasks = [x
|
||||
for x in subtasks
|
||||
if x.get_result() in [enum.ActionResult.PartialSuccess,
|
||||
enum.ActionResult.Failure]]
|
||||
if x.get_result() in [hd_fields.ActionResult.PartialSuccess,
|
||||
hd_fields.ActionResult.Failure]]
|
||||
|
||||
print("Task %s successful subtasks: %s" %
|
||||
(task.get_id(), len(success_subtasks)))
|
||||
@ -118,17 +118,17 @@ class PyghmiDriver(oob.OobDriver):
|
||||
|
||||
task_result = None
|
||||
if len(success_subtasks) > 0 and len(nosuccess_subtasks) > 0:
|
||||
task_result = enum.ActionResult.PartialSuccess
|
||||
task_result = hd_fields.ActionResult.PartialSuccess
|
||||
elif len(success_subtasks) == 0 and len(nosuccess_subtasks) > 0:
|
||||
task_result = enum.ActionResult.Failure
|
||||
task_result = hd_fields.ActionResult.Failure
|
||||
elif len(success_subtasks) > 0 and len(nosuccess_subtasks) == 0:
|
||||
task_result = enum.ActionResult.Success
|
||||
task_result = hd_fields.ActionResult.Success
|
||||
else:
|
||||
task_result = enum.ActionResult.Incomplete
|
||||
task_result = hd_fields.ActionResult.Incomplete
|
||||
|
||||
self.orchestrator.task_field_update(task.get_id(),
|
||||
result=task_result,
|
||||
status=enum.TaskStatus.Complete)
|
||||
status=hd_fields.TaskStatus.Complete)
|
||||
return
|
||||
|
||||
class PyghmiTaskRunner(drivers.DriverTaskRunner):
|
||||
@ -148,8 +148,8 @@ class PyghmiTaskRunner(drivers.DriverTaskRunner):
|
||||
|
||||
if len(self.task.node_list) != 1:
|
||||
self.orchestrator.task_field_update(self.task.get_id(),
|
||||
result=enum.ActionResult.Incomplete,
|
||||
status=enum.TaskStatus.Errored)
|
||||
result=hd_fields.ActionResult.Incomplete,
|
||||
status=hd_fields.TaskStatus.Errored)
|
||||
raise errors.DriverError("Multiple names (%s) in task %s node_list"
|
||||
% (len(self.task.node_list), self.task.get_id()))
|
||||
|
||||
@ -157,8 +157,8 @@ class PyghmiTaskRunner(drivers.DriverTaskRunner):
|
||||
|
||||
if self.node.get_name() != target_node_name:
|
||||
self.orchestrator.task_field_update(self.task.get_id(),
|
||||
result=enum.ActionResult.Incomplete,
|
||||
status=enum.TaskStatus.Errored)
|
||||
result=hd_fields.ActionResult.Incomplete,
|
||||
status=hd_fields.TaskStatus.Errored)
|
||||
raise errors.DriverError("Runner node does not match " \
|
||||
"task node scope")
|
||||
|
||||
@ -168,25 +168,25 @@ class PyghmiTaskRunner(drivers.DriverTaskRunner):
|
||||
|
||||
if ipmi_address is None:
|
||||
self.orchestrator.task_field_update(self.task.get_id(),
|
||||
result=enum.ActionResult.Incomplete,
|
||||
status=enum.TaskStatus.Errored)
|
||||
result=hd_fields.ActionResult.Incomplete,
|
||||
status=hd_fields.TaskStatus.Errored)
|
||||
raise errors.DriverError("Node %s has no IPMI address" %
|
||||
(target_node_name))
|
||||
|
||||
self.orchestrator.task_field_update(self.task.get_id(),
|
||||
status=enum.TaskStatus.Running)
|
||||
status=hd_fields.TaskStatus.Running)
|
||||
ipmi_account = self.node.applied.get('oob_account', '')
|
||||
ipmi_credential = self.node.applied.get('oob_credential', '')
|
||||
|
||||
ipmi_session = Command(bmc=ipmi_address, userid=ipmi_account,
|
||||
password=ipmi_credential)
|
||||
|
||||
if task_action == enum.OobAction.ConfigNodePxe:
|
||||
if task_action == hd_fields.OrchestratorAction.ConfigNodePxe:
|
||||
self.orchestrator.task_field_update(self.task.get_id(),
|
||||
result=enum.ActionResult.Failure,
|
||||
status=enum.TaskStatus.Complete)
|
||||
result=hd_fields.ActionResult.Failure,
|
||||
status=hd_fields.TaskStatus.Complete)
|
||||
return
|
||||
elif task_action == enum.OobAction.SetNodeBoot:
|
||||
elif task_action == hd_fields.OrchestratorAction.SetNodeBoot:
|
||||
ipmi_session.set_bootdev('pxe')
|
||||
|
||||
time.sleep(3)
|
||||
@ -195,14 +195,14 @@ class PyghmiTaskRunner(drivers.DriverTaskRunner):
|
||||
|
||||
if bootdev.get('bootdev', '') == 'network':
|
||||
self.orchestrator.task_field_update(self.task.get_id(),
|
||||
result=enum.ActionResult.Success,
|
||||
status=enum.TaskStatus.Complete)
|
||||
result=hd_fields.ActionResult.Success,
|
||||
status=hd_fields.TaskStatus.Complete)
|
||||
else:
|
||||
self.orchestrator.task_field_update(self.task.get_id(),
|
||||
result=enum.ActionResult.Failure,
|
||||
status=enum.TaskStatus.Complete)
|
||||
result=hd_fields.ActionResult.Failure,
|
||||
status=hd_fields.TaskStatus.Complete)
|
||||
return
|
||||
elif task_action == enum.OobAction.PowerOffNode:
|
||||
elif task_action == hd_fields.OrchestratorAction.PowerOffNode:
|
||||
ipmi_session.set_power('off')
|
||||
|
||||
i = 18
|
||||
@ -216,14 +216,14 @@ class PyghmiTaskRunner(drivers.DriverTaskRunner):
|
||||
|
||||
if power_state.get('powerstate', '') == 'off':
|
||||
self.orchestrator.task_field_update(self.task.get_id(),
|
||||
result=enum.ActionResult.Success,
|
||||
status=enum.TaskStatus.Complete)
|
||||
result=hd_fields.ActionResult.Success,
|
||||
status=hd_fields.TaskStatus.Complete)
|
||||
else:
|
||||
self.orchestrator.task_field_update(self.task.get_id(),
|
||||
result=enum.ActionResult.Failure,
|
||||
status=enum.TaskStatus.Complete)
|
||||
result=hd_fields.ActionResult.Failure,
|
||||
status=hd_fields.TaskStatus.Complete)
|
||||
return
|
||||
elif task_action == enum.OobAction.PowerOnNode:
|
||||
elif task_action == hd_fields.OrchestratorAction.PowerOnNode:
|
||||
ipmi_session.set_power('on')
|
||||
|
||||
i = 18
|
||||
@ -237,14 +237,14 @@ class PyghmiTaskRunner(drivers.DriverTaskRunner):
|
||||
|
||||
if power_state.get('powerstate', '') == 'on':
|
||||
self.orchestrator.task_field_update(self.task.get_id(),
|
||||
result=enum.ActionResult.Success,
|
||||
status=enum.TaskStatus.Complete)
|
||||
result=hd_fields.ActionResult.Success,
|
||||
status=hd_fields.TaskStatus.Complete)
|
||||
else:
|
||||
self.orchestrator.task_field_update(self.task.get_id(),
|
||||
result=enum.ActionResult.Failure,
|
||||
status=enum.TaskStatus.Complete)
|
||||
result=hd_fields.ActionResult.Failure,
|
||||
status=hd_fields.TaskStatus.Complete)
|
||||
return
|
||||
elif task_action == enum.OobAction.PowerCycleNode:
|
||||
elif task_action == hd_fields.OrchestratorAction.PowerCycleNode:
|
||||
ipmi_session.set_power('off')
|
||||
|
||||
# Wait for power state of off before booting back up
|
||||
@ -260,8 +260,8 @@ class PyghmiTaskRunner(drivers.DriverTaskRunner):
|
||||
|
||||
if power_state.get('powerstate', '') == 'on':
|
||||
self.orchestrator.task_field_update(self.task.get_id(),
|
||||
result=enum.ActionResult.Failure,
|
||||
status=enum.TaskStatus.Complete)
|
||||
result=hd_fields.ActionResult.Failure,
|
||||
status=hd_fields.TaskStatus.Complete)
|
||||
return
|
||||
|
||||
ipmi_session.set_power('on')
|
||||
@ -277,18 +277,18 @@ class PyghmiTaskRunner(drivers.DriverTaskRunner):
|
||||
|
||||
if power_state.get('powerstate', '') == 'on':
|
||||
self.orchestrator.task_field_update(self.task.get_id(),
|
||||
result=enum.ActionResult.Success,
|
||||
status=enum.TaskStatus.Complete)
|
||||
result=hd_fields.ActionResult.Success,
|
||||
status=hd_fields.TaskStatus.Complete)
|
||||
else:
|
||||
self.orchestrator.task_field_update(self.task.get_id(),
|
||||
result=enum.ActionResult.Failure,
|
||||
status=enum.TaskStatus.Complete)
|
||||
result=hd_fields.ActionResult.Failure,
|
||||
status=hd_fields.TaskStatus.Complete)
|
||||
return
|
||||
elif task_action == enum.OobAction.InterrogateNode:
|
||||
elif task_action == hd_fields.OrchestratorAction.InterrogateNode:
|
||||
mci_id = ipmi_session.get_mci()
|
||||
|
||||
self.orchestrator.task_field_update(self.task.get_id(),
|
||||
result=enum.ActionResult.Success,
|
||||
status=enum.TaskStatus.Complete,
|
||||
result=hd_fields.ActionResult.Success,
|
||||
status=hd_fields.TaskStatus.Complete,
|
||||
result_detail=mci_id)
|
||||
return
|
@ -17,14 +17,16 @@
|
||||
|
||||
import logging
|
||||
import yaml
|
||||
import uuid
|
||||
|
||||
import helm_drydock.model.site as site
|
||||
import helm_drydock.model.network as network
|
||||
import helm_drydock.model.hwprofile as hwprofile
|
||||
import helm_drydock.model.node as node
|
||||
import helm_drydock.model.hostprofile as hostprofile
|
||||
import helm_drydock.objects as objects
|
||||
import helm_drydock.objects.site as site
|
||||
import helm_drydock.objects.network as network
|
||||
import helm_drydock.objects.hwprofile as hwprofile
|
||||
import helm_drydock.objects.node as node
|
||||
import helm_drydock.objects.hostprofile as hostprofile
|
||||
|
||||
from helm_drydock.statemgmt import DesignState, SiteDesign, DesignError
|
||||
from helm_drydock.statemgmt import DesignState
|
||||
|
||||
class Ingester(object):
|
||||
|
||||
@ -64,22 +66,22 @@ class Ingester(object):
|
||||
self.log.error("ingest_data called without valid DesignState handler")
|
||||
raise Exception("Invalid design_state handler")
|
||||
|
||||
# TODO this method needs refactored to handle design base vs change
|
||||
|
||||
design_data = None
|
||||
|
||||
try:
|
||||
design_data = design_state.get_design_base()
|
||||
except DesignError:
|
||||
design_data = SiteDesign()
|
||||
design_state.post_design_base(design_data)
|
||||
# If no design_id is specified, instantiate a new one
|
||||
if 'design_id' not in kwargs.keys():
|
||||
design_id = str(uuid.uuid4())
|
||||
design_data = objects.SiteDesign(id=design_id)
|
||||
design_state.post_design(design_data)
|
||||
else:
|
||||
design_id = kwargs.get('design_id')
|
||||
design_data = design_state.get_design(design_id)
|
||||
|
||||
if plugin_name in self.registered_plugins:
|
||||
design_items = self.registered_plugins[plugin_name].ingest_data(**kwargs)
|
||||
# Need to persist data here, but we don't yet have the statemgmt service working
|
||||
for m in design_items:
|
||||
if type(m) is site.Site:
|
||||
design_data.add_site(m)
|
||||
design_data.set_site(m)
|
||||
elif type(m) is network.Network:
|
||||
design_data.add_network(m)
|
||||
elif type(m) is network.NetworkLink:
|
||||
@ -90,7 +92,7 @@ class Ingester(object):
|
||||
design_data.add_hardware_profile(m)
|
||||
elif type(m) is node.BaremetalNode:
|
||||
design_data.add_baremetal_node(m)
|
||||
design_state.put_design_base(design_data)
|
||||
design_state.put_design(design_data)
|
||||
else:
|
||||
self.log.error("Could not find plugin %s to ingest data." % (plugin_name))
|
||||
raise LookupError("Could not find plugin %s" % plugin_name)
|
||||
|
@ -19,25 +19,13 @@
|
||||
import yaml
|
||||
import logging
|
||||
|
||||
import helm_drydock.model.hwprofile as hwprofile
|
||||
import helm_drydock.model.node as node
|
||||
import helm_drydock.model.site as site
|
||||
import helm_drydock.model.hostprofile as hostprofile
|
||||
import helm_drydock.model.network as network
|
||||
import helm_drydock.objects.fields as hd_fields
|
||||
|
||||
from helm_drydock import objects
|
||||
from helm_drydock.ingester.plugins import IngesterPlugin
|
||||
|
||||
class YamlIngester(IngesterPlugin):
|
||||
|
||||
kind_map = {
|
||||
"Region": site.Site,
|
||||
"NetworkLink": network.NetworkLink,
|
||||
"HardwareProfile": hwprofile.HardwareProfile,
|
||||
"Network": network.Network,
|
||||
"HostProfile": hostprofile.HostProfile,
|
||||
"BaremetalNode": node.BaremetalNode,
|
||||
}
|
||||
|
||||
def __init__(self):
|
||||
super(YamlIngester, self).__init__()
|
||||
|
||||
@ -91,19 +79,274 @@ class YamlIngester(IngesterPlugin):
|
||||
for d in parsed_data:
|
||||
kind = d.get('kind', '')
|
||||
if kind != '':
|
||||
if kind in YamlIngester.kind_map:
|
||||
try:
|
||||
model = YamlIngester.kind_map[kind](**d)
|
||||
if kind == 'Region':
|
||||
api_version = d.get('apiVersion', '')
|
||||
|
||||
if api_version == 'v1.0':
|
||||
model = objects.Site()
|
||||
|
||||
metadata = d.get('metadata', {})
|
||||
|
||||
# Need to add validation logic, we'll assume the input is
|
||||
# valid for now
|
||||
model.name = metadata.get('name', '')
|
||||
model.status = hd_fields.SiteStatus.Unknown
|
||||
model.source = hd_fields.ModelSource.Designed
|
||||
|
||||
spec = d.get('spec', {})
|
||||
|
||||
model.tag_definitions = objects.NodeTagDefinitionList()
|
||||
|
||||
tag_defs = spec.get('tag_definitions', [])
|
||||
|
||||
for t in tag_defs:
|
||||
tag_model = objects.NodeTagDefinition()
|
||||
tag_model.tag = t.get('tag', '')
|
||||
tag_model.type = t.get('definition_type', '')
|
||||
tag_model.definition = t.get('definition', '')
|
||||
|
||||
if tag_model.type not in ['lshw_xpath']:
|
||||
raise ValueError('Unknown definition type in ' \
|
||||
'NodeTagDefinition: %s' % (self.definition_type))
|
||||
model.tag_definitions.append(tag_model)
|
||||
|
||||
models.append(model)
|
||||
except Exception as err:
|
||||
self.log.error("Error building model %s: %s"
|
||||
% (kind, str(err)))
|
||||
continue
|
||||
else:
|
||||
self.log.error(
|
||||
"Error processing document, unknown kind %s"
|
||||
% (kind))
|
||||
continue
|
||||
else:
|
||||
raise ValueError('Unknown API version %s of Region kind' %s (api_version))
|
||||
elif kind == 'NetworkLink':
|
||||
api_version = d.get('apiVersion', '')
|
||||
|
||||
if api_version == "v1.0":
|
||||
model = objects.NetworkLink()
|
||||
|
||||
metadata = d.get('metadata', {})
|
||||
spec = d.get('spec', {})
|
||||
|
||||
model.name = metadata.get('name', '')
|
||||
model.site = metadata.get('region', '')
|
||||
|
||||
bonding = spec.get('bonding', {})
|
||||
model.bonding_mode = bonding.get('mode',
|
||||
hd_fields.NetworkLinkBondingMode.Disabled)
|
||||
|
||||
# How should we define defaults for CIs not in the input?
|
||||
if model.bonding_mode == hd_fields.NetworkLinkBondingMode.LACP:
|
||||
model.bonding_xmit_hash = bonding.get('hash', 'layer3+4')
|
||||
model.bonding_peer_rate = bonding.get('peer_rate', 'fast')
|
||||
model.bonding_mon_rate = bonding.get('mon_rate', '100')
|
||||
model.bonding_up_delay = bonding.get('up_delay', '200')
|
||||
model.bonding_down_delay = bonding.get('down_delay', '200')
|
||||
|
||||
model.mtu = spec.get('mtu', None)
|
||||
model.linkspeed = spec.get('linkspeed', None)
|
||||
|
||||
trunking = spec.get('trunking', {})
|
||||
model.trunk_mode = trunking.get('mode', hd_fields.NetworkLinkTrunkingMode.Disabled)
|
||||
model.native_network = trunking.get('default_network', None)
|
||||
|
||||
models.append(model)
|
||||
else:
|
||||
raise ValueError('Unknown API version of object')
|
||||
elif kind == 'Network':
|
||||
api_version = d.get('apiVersion', '')
|
||||
|
||||
if api_version == "v1.0":
|
||||
model = objects.Network()
|
||||
|
||||
metadata = d.get('metadata', {})
|
||||
spec = d.get('spec', {})
|
||||
|
||||
model.name = metadata.get('name', '')
|
||||
model.site = metadata.get('region', '')
|
||||
|
||||
model.cidr = spec.get('cidr', None)
|
||||
model.allocation_strategy = spec.get('allocation', 'static')
|
||||
model.vlan_id = spec.get('vlan_id', 1)
|
||||
model.mtu = spec.get('mtu', None)
|
||||
|
||||
dns = spec.get('dns', {})
|
||||
model.dns_domain = dns.get('domain', 'local')
|
||||
model.dns_servers = dns.get('servers', None)
|
||||
|
||||
ranges = spec.get('ranges', [])
|
||||
model.ranges = []
|
||||
|
||||
for r in ranges:
|
||||
model.ranges.append({'type': r.get('type', None),
|
||||
'start': r.get('start', None),
|
||||
'end': r.get('end', None),
|
||||
})
|
||||
|
||||
routes = spec.get('routes', [])
|
||||
model.routes = []
|
||||
|
||||
for r in routes:
|
||||
model.routes.append({'subnet': r.get('subnet', None),
|
||||
'gateway': r.get('gateway', None),
|
||||
'metric': r.get('metric', None),
|
||||
})
|
||||
models.append(model)
|
||||
elif kind == 'HardwareProfile':
|
||||
api_version = d.get('apiVersion', '')
|
||||
|
||||
if api_version == 'v1.0':
|
||||
metadata = d.get('metadata', {})
|
||||
spec = d.get('spec', {})
|
||||
|
||||
model = objects.HardwareProfile()
|
||||
|
||||
# Need to add validation logic, we'll assume the input is
|
||||
# valid for now
|
||||
model.name = metadata.get('name', '')
|
||||
model.site = metadata.get('region', '')
|
||||
model.source = hd_fields.ModelSource.Designed
|
||||
|
||||
model.vendor = spec.get('vendor', None)
|
||||
model.generation = spec.get('generation', None)
|
||||
model.hw_version = spec.get('hw_version', None)
|
||||
model.bios_version = spec.get('bios_version', None)
|
||||
model.boot_mode = spec.get('boot_mode', None)
|
||||
model.bootstrap_protocol = spec.get('bootstrap_protocol', None)
|
||||
model.pxe_interface = spec.get('pxe_interface', None)
|
||||
|
||||
model.devices = objects.HardwareDeviceAliasList()
|
||||
|
||||
device_aliases = spec.get('device_aliases', {})
|
||||
|
||||
for d in device_aliases:
|
||||
dev_model = objects.HardwareDeviceAlias()
|
||||
dev_model.source = hd_fields.ModelSource.Designed
|
||||
dev_model.alias = d.get('alias', None)
|
||||
dev_model.bus_type = d.get('bus_type', None)
|
||||
dev_model.dev_type = d.get('dev_type', None)
|
||||
dev_model.address = d.get('address', None)
|
||||
model.devices.append(dev_model)
|
||||
|
||||
models.append(model)
|
||||
elif kind == 'HostProfile' or kind == 'BaremetalNode':
|
||||
api_version = d.get('apiVersion', '')
|
||||
|
||||
if api_version == "v1.0":
|
||||
model = None
|
||||
|
||||
if kind == 'HostProfile':
|
||||
model = objects.HostProfile()
|
||||
else:
|
||||
model = objects.BaremetalNode()
|
||||
|
||||
metadata = d.get('metadata', {})
|
||||
spec = d.get('spec', {})
|
||||
|
||||
model.name = metadata.get('name', '')
|
||||
model.site = metadata.get('region', '')
|
||||
model.source = hd_fields.ModelSource.Designed
|
||||
|
||||
model.parent_profile = spec.get('host_profile', None)
|
||||
model.hardware_profile = spec.get('hardware_profile', None)
|
||||
|
||||
oob = spec.get('oob', {})
|
||||
|
||||
model.oob_type = oob.get('type', None)
|
||||
model.oob_network = oob.get('network', None)
|
||||
model.oob_account = oob.get('account', None)
|
||||
model.oob_credential = oob.get('credential', None)
|
||||
|
||||
storage = spec.get('storage', {})
|
||||
model.storage_layout = storage.get('layout', 'lvm')
|
||||
|
||||
bootdisk = storage.get('bootdisk', {})
|
||||
model.bootdisk_device = bootdisk.get('device', None)
|
||||
model.bootdisk_root_size = bootdisk.get('root_size', None)
|
||||
model.bootdisk_boot_size = bootdisk.get('boot_size', None)
|
||||
|
||||
partitions = storage.get('partitions', [])
|
||||
model.partitions = objects.HostPartitionList()
|
||||
|
||||
for p in partitions:
|
||||
part_model = objects.HostPartition()
|
||||
|
||||
part_model.name = p.get('name', None)
|
||||
part_model.source = hd_fields.ModelSource.Designed
|
||||
part_model.device = p.get('device', None)
|
||||
part_model.part_uuid = p.get('part_uuid', None)
|
||||
part_model.size = p.get('size', None)
|
||||
part_model.mountpoint = p.get('mountpoint', None)
|
||||
part_model.fstype = p.get('fstype', 'ext4')
|
||||
part_model.mount_options = p.get('mount_options', 'defaults')
|
||||
part_model.fs_uuid = p.get('fs_uuid', None)
|
||||
part_model.fs_label = p.get('fs_label', None)
|
||||
|
||||
model.partitions.append(part_model)
|
||||
|
||||
interfaces = spec.get('interfaces', [])
|
||||
model.interfaces = objects.HostInterfaceList()
|
||||
|
||||
for i in interfaces:
|
||||
int_model = objects.HostInterface()
|
||||
|
||||
int_model.device_name = i.get('device_name', None)
|
||||
int_model.network_link = i.get('device_link', None)
|
||||
|
||||
int_model.hardware_slaves = []
|
||||
slaves = i.get('slaves', [])
|
||||
|
||||
for s in slaves:
|
||||
int_model.hardware_slaves.append(s)
|
||||
|
||||
int_model.networks = []
|
||||
networks = i.get('networks', [])
|
||||
|
||||
for n in networks:
|
||||
int_model.networks.append(n)
|
||||
|
||||
model.interfaces.append(int_model)
|
||||
|
||||
node_metadata = spec.get('metadata', {})
|
||||
metadata_tags = node_metadata.get('tags', [])
|
||||
model.tags = []
|
||||
|
||||
for t in metadata_tags:
|
||||
model.tags.append(t)
|
||||
|
||||
owner_data = node_metadata.get('owner_data', {})
|
||||
model.owner_data = {}
|
||||
|
||||
for k, v in owner_data.items():
|
||||
model.owner_data[k] = v
|
||||
|
||||
model.rack = node_metadata.get('rack', None)
|
||||
|
||||
if kind == 'BaremetalNode':
|
||||
addresses = spec.get('addressing', [])
|
||||
|
||||
if len(addresses) == 0:
|
||||
raise ValueError('BaremetalNode needs at least' \
|
||||
' 1 assigned address')
|
||||
|
||||
model.addressing = objects.IpAddressAssignmentList()
|
||||
|
||||
for a in addresses:
|
||||
assignment = objects.IpAddressAssignment()
|
||||
|
||||
address = a.get('address', '')
|
||||
if address == 'dhcp':
|
||||
assignment.type = 'dhcp'
|
||||
assignment.address = None
|
||||
assignment.network = a.get('network')
|
||||
|
||||
model.addressing.append(assignment)
|
||||
elif address != '':
|
||||
assignment.type = 'static'
|
||||
assignment.address = a.get('address')
|
||||
assignment.network = a.get('network')
|
||||
|
||||
model.addressing.append(assignment)
|
||||
else:
|
||||
self.log.error("Invalid address assignment %s on Node %s"
|
||||
% (address, self.name))
|
||||
models.append(model)
|
||||
else:
|
||||
raise ValueError('Unknown API version %s of Kind HostProfile' % (api_version))
|
||||
else:
|
||||
self.log.error(
|
||||
"Error processing document in %s, no kind field"
|
||||
|
@ -1,477 +0,0 @@
|
||||
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Models for helm_drydock
|
||||
#
|
||||
import logging
|
||||
|
||||
from copy import deepcopy
|
||||
|
||||
from helm_drydock.enum import SiteStatus
|
||||
from helm_drydock.enum import NodeStatus
|
||||
from helm_drydock.model.network import Network
|
||||
from helm_drydock.model.network import NetworkLink
|
||||
from helm_drydock.model import Utils
|
||||
|
||||
class HostProfile(object):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
self.log = logging.Logger('model')
|
||||
|
||||
self.api_version = kwargs.get('apiVersion', '')
|
||||
|
||||
if self.api_version == "v1.0":
|
||||
metadata = kwargs.get('metadata', {})
|
||||
spec = kwargs.get('spec', {})
|
||||
|
||||
self.name = metadata.get('name', '')
|
||||
self.site = metadata.get('region', '')
|
||||
|
||||
# Design Data
|
||||
self.design = {}
|
||||
|
||||
self.design['parent_profile'] = spec.get('host_profile', None)
|
||||
self.design['hardware_profile'] = spec.get('hardware_profile', None)
|
||||
|
||||
|
||||
oob = spec.get('oob', {})
|
||||
|
||||
self.design['oob_type'] = oob.get('type', None)
|
||||
self.design['oob_network'] = oob.get('network', None)
|
||||
self.design['oob_account'] = oob.get('account', None)
|
||||
self.design['oob_credential'] = oob.get('credential', None)
|
||||
|
||||
storage = spec.get('storage', {})
|
||||
self.design['storage_layout'] = storage.get('layout', 'lvm')
|
||||
|
||||
bootdisk = storage.get('bootdisk', {})
|
||||
self.design['bootdisk_device'] = bootdisk.get('device', None)
|
||||
self.design['bootdisk_root_size'] = bootdisk.get('root_size', None)
|
||||
self.design['bootdisk_boot_size'] = bootdisk.get('boot_size', None)
|
||||
|
||||
partitions = storage.get('partitions', [])
|
||||
self.design['partitions'] = []
|
||||
|
||||
for p in partitions:
|
||||
self.design['partitions'].append(HostPartition(self.api_version, **p))
|
||||
|
||||
interfaces = spec.get('interfaces', [])
|
||||
self.design['interfaces'] = []
|
||||
|
||||
for i in interfaces:
|
||||
self.design['interfaces'].append(HostInterface(self.api_version, **i))
|
||||
|
||||
node_metadata = spec.get('metadata', {})
|
||||
|
||||
metadata_tags = node_metadata.get('tags', [])
|
||||
self.design['tags'] = []
|
||||
|
||||
for t in metadata_tags:
|
||||
self.design['tags'].append(t)
|
||||
|
||||
owner_data = node_metadata.get('owner_data', {})
|
||||
self.design['owner_data'] = {}
|
||||
|
||||
for k, v in owner_data.items():
|
||||
self.design['owner_data'][k] = v
|
||||
|
||||
self.design['rack'] = node_metadata.get('rack', None)
|
||||
|
||||
else:
|
||||
self.log.error("Unknown API version %s of %s" %
|
||||
(self.api_version, self.__class__))
|
||||
raise ValueError('Unknown API version of object')
|
||||
|
||||
def get_rack(self):
|
||||
if getattr(self, 'applied', None) is not None:
|
||||
return self.applied.get('rack', None)
|
||||
else:
|
||||
return self.design.get('rack', None)
|
||||
|
||||
def get_name(self):
|
||||
return self.name
|
||||
|
||||
def has_tag(self, tag):
|
||||
if getattr(self, 'applied', None) is not None:
|
||||
if tag in self.applied.get('tags', []):
|
||||
return True
|
||||
else:
|
||||
if tag in self.design.get('tags', []):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def apply_inheritance(self, site):
|
||||
# No parent to inherit from, just apply design values
|
||||
# and return
|
||||
if self.design['parent_profile'] is None:
|
||||
self.applied = deepcopy(self.design)
|
||||
return
|
||||
|
||||
parent = site.get_host_profile(self.design['parent_profile'])
|
||||
|
||||
if parent is None:
|
||||
raise NameError("Cannot find parent profile %s for %s"
|
||||
% (self.design['parent_profile'], self.name))
|
||||
|
||||
parent.apply_inheritance(site)
|
||||
|
||||
# First compute inheritance for simple fields
|
||||
inheritable_field_list = [
|
||||
"hardware_profile", "oob_type", "oob_network",
|
||||
"oob_credential", "oob_account", "storage_layout",
|
||||
"bootdisk_device", "bootdisk_root_size", "bootdisk_boot_size",
|
||||
"rack"]
|
||||
|
||||
# Create applied data from self design values and parent
|
||||
# applied values
|
||||
|
||||
self.applied = {}
|
||||
|
||||
for f in inheritable_field_list:
|
||||
self.applied[f] = Utils.apply_field_inheritance(
|
||||
self.design.get(f, None),
|
||||
parent.applied.get(f, None))
|
||||
|
||||
# Now compute inheritance for complex types
|
||||
self.applied['tags'] = Utils.merge_lists(self.design['tags'],
|
||||
parent.applied['tags'])
|
||||
|
||||
self.applied['owner_data'] = Utils.merge_dicts(
|
||||
self.design['owner_data'], parent.applied['owner_data'])
|
||||
|
||||
self.applied['interfaces'] = HostInterface.merge_lists(
|
||||
self.design['interfaces'], parent.applied['interfaces'])
|
||||
|
||||
self.applied['partitions'] = HostPartition.merge_lists(
|
||||
self.design['partitions'], parent.applied['partitions'])
|
||||
|
||||
return
|
||||
|
||||
|
||||
class HostInterface(object):
|
||||
|
||||
def __init__(self, api_version, **kwargs):
|
||||
self.log = logging.Logger('model')
|
||||
|
||||
self.api_version = api_version
|
||||
|
||||
if self.api_version == "v1.0":
|
||||
self.device_name = kwargs.get('device_name', None)
|
||||
|
||||
self.design = {}
|
||||
self.design['network_link'] = kwargs.get('device_link', None)
|
||||
|
||||
self.design['hardware_slaves'] = []
|
||||
slaves = kwargs.get('slaves', [])
|
||||
|
||||
for s in slaves:
|
||||
self.design['hardware_slaves'].append(s)
|
||||
|
||||
self.design['networks'] = []
|
||||
networks = kwargs.get('networks', [])
|
||||
|
||||
for n in networks:
|
||||
self.design['networks'].append(n)
|
||||
else:
|
||||
self.log.error("Unknown API version %s of %s" %
|
||||
(self.api_version, self.__class__))
|
||||
raise ValueError('Unknown API version of object')
|
||||
|
||||
# Ensure applied_data exists
|
||||
def ensure_applied_data(self):
|
||||
if getattr(self, 'applied', None) is None:
|
||||
self.applied = deepcopy(self.design)
|
||||
|
||||
return
|
||||
|
||||
def get_name(self):
|
||||
return self.device_name
|
||||
|
||||
def get_applied_hw_slaves(self):
|
||||
self.ensure_applied_data()
|
||||
|
||||
return self.applied.get('hardware_slaves', [])
|
||||
|
||||
def get_applied_slave_selectors(self):
|
||||
self.ensure_applied_data()
|
||||
|
||||
return self.applied.get('selectors', None)
|
||||
|
||||
# Return number of slaves for this interface
|
||||
def get_applied_slave_count(self):
|
||||
self.ensure_applied_data()
|
||||
|
||||
return len(self.applied.get('hardware_slaves', []))
|
||||
|
||||
def get_network_configs(self):
|
||||
self.ensure_applied_data()
|
||||
return self.applied.get('attached_networks', [])
|
||||
|
||||
# The device attribute may be hardware alias that translates to a
|
||||
# physical device address. If the device attribute does not match an
|
||||
# alias, we assume it directly identifies a OS device name. When the
|
||||
# apply_hardware_profile method is called on the parent Node of this
|
||||
# device, the selector will be decided and applied
|
||||
|
||||
def add_selector(self, sel_type, address='', dev_type=''):
|
||||
self.ensure_applied_data()
|
||||
|
||||
if self.applied.get('selectors', None) is None:
|
||||
self.applied['selectors'] = []
|
||||
|
||||
new_selector = {}
|
||||
new_selector['selector_type'] = sel_type
|
||||
new_selector['address'] = address
|
||||
new_selector['device_type'] = dev_type
|
||||
|
||||
self.applied['selectors'].append(new_selector)
|
||||
|
||||
def apply_link_config(self, net_link):
|
||||
if (net_link is not None and
|
||||
isinstance(net_link, NetworkLink) and
|
||||
net_link.name == self.design.get('network_link', '')):
|
||||
|
||||
self.ensure_applied_data()
|
||||
|
||||
self.applied['attached_link'] = deepcopy(net_link)
|
||||
return True
|
||||
return False
|
||||
|
||||
def apply_network_config(self, network):
|
||||
if network.name in self.design['networks']:
|
||||
self.ensure_applied_data()
|
||||
if self.applied.get('attached_networks', None) is None:
|
||||
self.applied['attached_networks'] = []
|
||||
self.applied['attached_networks'].append(deepcopy(network))
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def set_network_address(self, network_name, address):
|
||||
self.ensure_applied_data()
|
||||
|
||||
if self.applied.get('attached_networks', None) is None:
|
||||
return False
|
||||
|
||||
for n in self.applied.get('attached_networks', []):
|
||||
if n.name == network_name:
|
||||
setattr(n, 'assigned_address', address)
|
||||
|
||||
"""
|
||||
Merge two lists of HostInterface models with child_list taking
|
||||
priority when conflicts. If a member of child_list has a device_name
|
||||
beginning with '!' it indicates that HostInterface should be
|
||||
removed from the merged list
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def merge_lists(child_list, parent_list):
|
||||
effective_list = []
|
||||
|
||||
if len(child_list) == 0 and len(parent_list) > 0:
|
||||
for p in parent_list:
|
||||
pp = deepcopy(p)
|
||||
pp.ensure_applied_data()
|
||||
effective_list.append(pp)
|
||||
elif len(parent_list) == 0 and len(child_list) > 0:
|
||||
for i in child_list:
|
||||
if i.get_name().startswith('!'):
|
||||
continue
|
||||
else:
|
||||
ii = deepcopy(i)
|
||||
ii.ensure_applied_data()
|
||||
effective_list.append(ii)
|
||||
elif len(parent_list) > 0 and len(child_list) > 0:
|
||||
parent_interfaces = []
|
||||
for i in parent_list:
|
||||
parent_name = i.get_name()
|
||||
parent_interfaces.append(parent_name)
|
||||
add = True
|
||||
for j in child_list:
|
||||
if j.get_name() == ("!" + parent_name):
|
||||
add = False
|
||||
break
|
||||
elif j.device_name == parent_name:
|
||||
m = HostInterface(j.api_version)
|
||||
m.device_name = j.get_name()
|
||||
m.design['network_link'] = \
|
||||
Utils.apply_field_inheritance(
|
||||
j.design.get('network_link', None),
|
||||
i.applied.get('network_link', None))
|
||||
|
||||
s = [x for x
|
||||
in i.applied.get('hardware_slaves', [])
|
||||
if ("!" + x) not in j.design.get(
|
||||
'hardware_slaves', [])]
|
||||
|
||||
s.extend(
|
||||
[x for x
|
||||
in j.design.get('hardware_slaves', [])
|
||||
if not x.startswith("!")])
|
||||
|
||||
m.design['hardware_slaves'] = s
|
||||
|
||||
n = [x for x
|
||||
in i.applied.get('networks',[])
|
||||
if ("!" + x) not in j.design.get(
|
||||
'networks', [])]
|
||||
|
||||
n.extend(
|
||||
[x for x
|
||||
in j.design.get('networks', [])
|
||||
if not x.startswith("!")])
|
||||
|
||||
m.design['networks'] = n
|
||||
m.ensure_applied_data()
|
||||
|
||||
effective_list.append(m)
|
||||
add = False
|
||||
break
|
||||
|
||||
if add:
|
||||
ii = deepcopy(i)
|
||||
ii.ensure_applied_data()
|
||||
effective_list.append(ii)
|
||||
|
||||
for j in child_list:
|
||||
if (j.device_name not in parent_interfaces
|
||||
and not j.device_name.startswith("!")):
|
||||
jj = deepcopy(j)
|
||||
jj.ensure_applied_data()
|
||||
effective_list.append(jj)
|
||||
|
||||
return effective_list
|
||||
|
||||
|
||||
class HostPartition(object):
|
||||
|
||||
def __init__(self, api_version, **kwargs):
|
||||
self.api_version = api_version
|
||||
|
||||
if self.api_version == "v1.0":
|
||||
self.name = kwargs.get('name', None)
|
||||
|
||||
self.design = {}
|
||||
self.design['device'] = kwargs.get('device', None)
|
||||
self.design['part_uuid'] = kwargs.get('part_uuid', None)
|
||||
self.design['size'] = kwargs.get('size', None)
|
||||
self.design['mountpoint'] = kwargs.get('mountpoint', None)
|
||||
self.design['fstype'] = kwargs.get('fstype', 'ext4')
|
||||
self.design['mount_options'] = kwargs.get('mount_options', 'defaults')
|
||||
self.design['fs_uuid'] = kwargs.get('fs_uuid', None)
|
||||
self.design['fs_label'] = kwargs.get('fs_label', None)
|
||||
|
||||
self.applied = kwargs.get('applied', None)
|
||||
self.build = kwargs.get('build', None)
|
||||
else:
|
||||
raise ValueError('Unknown API version of object')
|
||||
|
||||
# Ensure applied_data exists
|
||||
def ensure_applied_data(self):
|
||||
if getattr(self, 'applied', None) is None:
|
||||
self.applied = deepcopy(self.design)
|
||||
|
||||
return
|
||||
|
||||
def get_applied_device(self):
|
||||
self.ensure_applied_data()
|
||||
|
||||
return self.applied.get('device', '')
|
||||
|
||||
def get_name(self):
|
||||
return self.name
|
||||
|
||||
# The device attribute may be hardware alias that translates to a
|
||||
# physical device address. If the device attribute does not match an
|
||||
# alias, we assume it directly identifies a OS device name. When the
|
||||
# apply_hardware_profile method is called on the parent Node of this
|
||||
# device, the selector will be decided and applied
|
||||
|
||||
def set_selector(self, sel_type, address='', dev_type=''):
|
||||
self.ensure_applied_data()
|
||||
|
||||
selector = {}
|
||||
selector['type'] = sel_type
|
||||
selector['address'] = address
|
||||
selector['device_type'] = dev_type
|
||||
|
||||
self.applied['selector'] = selector
|
||||
|
||||
def get_selector(self):
|
||||
self.ensure_applied_data()
|
||||
return self.applied.get('selector', None)
|
||||
|
||||
"""
|
||||
Merge two lists of HostPartition models with child_list taking
|
||||
priority when conflicts. If a member of child_list has a name
|
||||
beginning with '!' it indicates that HostPartition should be
|
||||
removed from the merged list
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def merge_lists(child_list, parent_list):
|
||||
effective_list = []
|
||||
|
||||
if len(child_list) == 0 and len(parent_list) > 0:
|
||||
for p in parent_list:
|
||||
pp = deepcopy(p)
|
||||
pp.ensure_applied_data()
|
||||
effective_list.append(pp)
|
||||
elif len(parent_list) == 0 and len(child_list) > 0:
|
||||
for i in child_list:
|
||||
if i.get_name().startswith('!'):
|
||||
continue
|
||||
else:
|
||||
ii = deepcopy(i)
|
||||
ii.ensure_applied_data()
|
||||
effective_list.append(ii)
|
||||
elif len(parent_list) > 0 and len(child_list) > 0:
|
||||
inherit_field_list = ["device", "part_uuid", "size",
|
||||
"mountpoint", "fstype", "mount_options",
|
||||
"fs_uuid", "fs_label"]
|
||||
parent_partitions = []
|
||||
for i in parent_list:
|
||||
parent_name = i.get_name()
|
||||
parent_partitions.append(parent_name)
|
||||
add = True
|
||||
for j in child_list:
|
||||
if j.get_name() == ("!" + parent_name):
|
||||
add = False
|
||||
break
|
||||
elif j.get_name() == parent_name:
|
||||
p = HostPartition(j.api_version)
|
||||
p.name = j.get_name()
|
||||
|
||||
for f in inherit_field_list:
|
||||
j_f = j.design.get(f, None)
|
||||
i_f = i.applied.get(f, None)
|
||||
p.design.set(p,
|
||||
Utils.apply_field_inheritance(j_f, i_f))
|
||||
add = False
|
||||
p.ensure_applied_data()
|
||||
effective_list.append(p)
|
||||
if add:
|
||||
ii = deepcopy(i)
|
||||
ii.ensure_applied_data()
|
||||
effective_list.append(ii)
|
||||
|
||||
for j in child_list:
|
||||
if (j.get_name() not in parent_list and
|
||||
not j.get_name().startswith("!")):
|
||||
jj = deepcopy(j)
|
||||
jj.ensure_applied_data
|
||||
effective_list.append(jj)
|
||||
|
||||
return effective_list
|
@ -1,98 +0,0 @@
|
||||
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Models for helm_drydock
|
||||
#
|
||||
import logging
|
||||
|
||||
from copy import deepcopy
|
||||
|
||||
from helm_drydock.enum import SiteStatus
|
||||
from helm_drydock.enum import NodeStatus
|
||||
|
||||
class HardwareProfile(object):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
self.log = logging.Logger('model')
|
||||
|
||||
self.api_version = kwargs.get('apiVersion', '')
|
||||
|
||||
if self.api_version == "v1.0":
|
||||
metadata = kwargs.get('metadata', {})
|
||||
spec = kwargs.get('spec', {})
|
||||
|
||||
# Need to add validation logic, we'll assume the input is
|
||||
# valid for now
|
||||
self.name = metadata.get('name', '')
|
||||
self.site = metadata.get('region', '')
|
||||
|
||||
self.vendor = spec.get('vendor', None)
|
||||
self.generation = spec.get('generation', None)
|
||||
self.hw_version = spec.get('hw_version', None)
|
||||
self.bios_version = spec.get('bios_version', None)
|
||||
self.boot_mode = spec.get('boot_mode', None)
|
||||
self.bootstrap_protocol = spec.get('bootstrap_protocol', None)
|
||||
self.pxe_interface = spec.get('pxe_interface', None)
|
||||
self.devices = []
|
||||
|
||||
device_aliases = spec.get('device_aliases', {})
|
||||
|
||||
pci_devices = device_aliases.get('pci', [])
|
||||
scsi_devices = device_aliases.get('scsi', [])
|
||||
|
||||
for d in pci_devices:
|
||||
d['bus_type'] = 'pci'
|
||||
self.devices.append(
|
||||
HardwareDeviceAlias(self.api_version, **d))
|
||||
|
||||
for d in scsi_devices:
|
||||
d['bus_type'] = 'scsi'
|
||||
self.devices.append(
|
||||
HardwareDeviceAlias(self.api_version, **d))
|
||||
else:
|
||||
self.log.error("Unknown API version %s of %s" %
|
||||
(self.api_version, self.__class__))
|
||||
raise ValueError('Unknown API version of object')
|
||||
|
||||
return
|
||||
|
||||
def get_name(self):
|
||||
return self.name
|
||||
|
||||
def resolve_alias(self, alias_type, alias):
|
||||
selector = {}
|
||||
for d in self.devices:
|
||||
if d.alias == alias and d.bus_type == alias_type:
|
||||
selector['address'] = d.address
|
||||
selector['device_type'] = d.type
|
||||
return selector
|
||||
|
||||
return None
|
||||
|
||||
class HardwareDeviceAlias(object):
|
||||
|
||||
def __init__(self, api_version, **kwargs):
|
||||
self.log = logging.Logger('model')
|
||||
|
||||
self.api_version = api_version
|
||||
|
||||
if self.api_version == "v1.0":
|
||||
self.bus_type = kwargs.get('bus_type', None)
|
||||
self.address = kwargs.get('address', None)
|
||||
self.alias = kwargs.get('alias', None)
|
||||
self.type = kwargs.get('type', None)
|
||||
else:
|
||||
self.log.error("Unknown API version %s of %s" %
|
||||
(self.api_version, self.__class__))
|
||||
raise ValueError('Unknown API version of object')
|
@ -1,138 +0,0 @@
|
||||
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Models for helm_drydock
|
||||
#
|
||||
import logging
|
||||
|
||||
from copy import deepcopy
|
||||
|
||||
from helm_drydock.enum import SiteStatus
|
||||
from helm_drydock.enum import NodeStatus
|
||||
|
||||
class NetworkLink(object):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
self.log = logging.Logger('model')
|
||||
|
||||
self.api_version = kwargs.get('apiVersion', '')
|
||||
|
||||
if self.api_version == "v1.0":
|
||||
metadata = kwargs.get('metadata', {})
|
||||
spec = kwargs.get('spec', {})
|
||||
|
||||
self.name = metadata.get('name', '')
|
||||
self.site = metadata.get('region', '')
|
||||
|
||||
bonding = spec.get('bonding', {})
|
||||
self.bonding_mode = bonding.get('mode', 'none')
|
||||
|
||||
# How should we define defaults for CIs not in the input?
|
||||
if self.bonding_mode == '802.3ad':
|
||||
self.bonding_xmit_hash = bonding.get('hash', 'layer3+4')
|
||||
self.bonding_peer_rate = bonding.get('peer_rate', 'fast')
|
||||
self.bonding_mon_rate = bonding.get('mon_rate', '100')
|
||||
self.bonding_up_delay = bonding.get('up_delay', '200')
|
||||
self.bonding_down_delay = bonding.get('down_delay', '200')
|
||||
|
||||
self.mtu = spec.get('mtu', 1500)
|
||||
self.linkspeed = spec.get('linkspeed', 'auto')
|
||||
|
||||
trunking = spec.get('trunking', {})
|
||||
self.trunk_mode = trunking.get('mode', 'none')
|
||||
|
||||
self.native_network = spec.get('default_network', '')
|
||||
else:
|
||||
self.log.error("Unknown API version %s of %s" %
|
||||
(self.api_version, self.__class__))
|
||||
raise ValueError('Unknown API version of object')
|
||||
|
||||
def get_name(self):
|
||||
return self.name
|
||||
|
||||
class Network(object):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
self.log = logging.Logger('model')
|
||||
|
||||
self.api_version = kwargs.get('apiVersion', '')
|
||||
|
||||
if self.api_version == "v1.0":
|
||||
metadata = kwargs.get('metadata', {})
|
||||
spec = kwargs.get('spec', {})
|
||||
|
||||
self.name = metadata.get('name', '')
|
||||
self.site = metadata.get('region', '')
|
||||
|
||||
self.cidr = spec.get('cidr', None)
|
||||
self.allocation_strategy = spec.get('allocation', 'static')
|
||||
self.vlan_id = spec.get('vlan_id', 1)
|
||||
self.mtu = spec.get('mtu', 0)
|
||||
|
||||
dns = spec.get('dns', {})
|
||||
self.dns_domain = dns.get('domain', 'local')
|
||||
self.dns_servers = dns.get('servers', None)
|
||||
|
||||
ranges = spec.get('ranges', [])
|
||||
self.ranges = []
|
||||
|
||||
for r in ranges:
|
||||
self.ranges.append(NetworkAddressRange(self.api_version, **r))
|
||||
|
||||
routes = spec.get('routes', [])
|
||||
self.routes = []
|
||||
|
||||
for r in routes:
|
||||
self.routes.append(NetworkRoute(self.api_version, **r))
|
||||
else:
|
||||
self.log.error("Unknown API version %s of %s" %
|
||||
(self.api_version, self.__class__))
|
||||
raise ValueError('Unknown API version of object')
|
||||
|
||||
def get_name(self):
|
||||
return self.name
|
||||
|
||||
|
||||
class NetworkAddressRange(object):
|
||||
|
||||
def __init__(self, api_version, **kwargs):
|
||||
self.log = logging.Logger('model')
|
||||
|
||||
self.api_version = api_version
|
||||
|
||||
if self.api_version == "v1.0":
|
||||
self.type = kwargs.get('type', None)
|
||||
self.start = kwargs.get('start', None)
|
||||
self.end = kwargs.get('end', None)
|
||||
else:
|
||||
self.log.error("Unknown API version %s of %s" %
|
||||
(self.api_version, self.__class__))
|
||||
raise ValueError('Unknown API version of object')
|
||||
|
||||
|
||||
class NetworkRoute(object):
|
||||
|
||||
def __init__(self, api_version, **kwargs):
|
||||
self.log = logging.Logger('model')
|
||||
|
||||
self.api_version = api_version
|
||||
|
||||
if self.api_version == "v1.0":
|
||||
self.type = kwargs.get('subnet', None)
|
||||
self.start = kwargs.get('gateway', None)
|
||||
self.end = kwargs.get('metric', 100)
|
||||
else:
|
||||
self.log.error("Unknown API version %s of %s" %
|
||||
(self.api_version, self.__class__))
|
||||
raise ValueError('Unknown API version of object')
|
@ -1,187 +0,0 @@
|
||||
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Models for helm_drydock
|
||||
#
|
||||
import logging
|
||||
|
||||
from copy import deepcopy
|
||||
|
||||
|
||||
from helm_drydock.enum import SiteStatus
|
||||
from helm_drydock.enum import NodeStatus
|
||||
from helm_drydock.model.hostprofile import HostProfile
|
||||
from helm_drydock.model import Utils
|
||||
|
||||
class BaremetalNode(HostProfile):
|
||||
|
||||
# A BaremetalNode is really nothing more than a physical
|
||||
# instantiation of a HostProfile, so they both represent
|
||||
# the same set of CIs
|
||||
def __init__(self, **kwargs):
|
||||
super(BaremetalNode, self).__init__(**kwargs)
|
||||
|
||||
if self.api_version == "v1.0":
|
||||
addressing = []
|
||||
|
||||
self.design['addressing'] = addressing
|
||||
|
||||
spec = kwargs.get('spec', {})
|
||||
addresses = spec.get('addressing', [])
|
||||
|
||||
if len(addresses) == 0:
|
||||
raise ValueError('BaremetalNode needs at least' \
|
||||
' 1 assigned address')
|
||||
for a in addresses:
|
||||
assignment = {}
|
||||
address = a.get('address', '')
|
||||
if address == 'dhcp':
|
||||
assignment['type'] = 'dhcp'
|
||||
assignment['address'] = None
|
||||
assignment['network'] = a.get('network')
|
||||
addressing.append(assignment)
|
||||
elif address != '':
|
||||
assignment['type'] = 'static'
|
||||
assignment['address'] = a.get('address')
|
||||
assignment['network'] = a.get('network')
|
||||
addressing.append(assignment)
|
||||
else:
|
||||
self.log.error("Invalid address assignment %s on Node %s"
|
||||
% (address, self.name))
|
||||
|
||||
self.applied = kwargs.get('applied_data', None)
|
||||
self.build = kwargs.get('build', None)
|
||||
|
||||
# Compile the applied version of this model sourcing referenced
|
||||
# data from the passed site design
|
||||
def compile_applied_model(self, site):
|
||||
self.apply_host_profile(site)
|
||||
|
||||
self.applied['addressing'] = deepcopy(self.design['addressing'])
|
||||
|
||||
self.apply_hardware_profile(site)
|
||||
self.apply_network_connections(site)
|
||||
return
|
||||
|
||||
def init_build(self):
|
||||
if self.build is None:
|
||||
self.build = {}
|
||||
self.build['status'] = NodeStatus.Unknown
|
||||
|
||||
def apply_host_profile(self, site):
|
||||
self.apply_inheritance(site)
|
||||
return
|
||||
|
||||
# Translate device alises to physical selectors and copy
|
||||
# other hardware attributes into this object
|
||||
def apply_hardware_profile(self, site):
|
||||
if self.applied['hardware_profile'] is None:
|
||||
raise ValueError("Hardware profile not set")
|
||||
|
||||
hw_profile = site.get_hardware_profile(
|
||||
self.applied['hardware_profile'])
|
||||
|
||||
for i in self.applied.get('interfaces', []):
|
||||
for s in i.get_applied_hw_slaves():
|
||||
selector = hw_profile.resolve_alias("pci", s)
|
||||
if selector is None:
|
||||
i.add_selector("name", address=s)
|
||||
else:
|
||||
i.add_selector("address", address=selector['address'],
|
||||
dev_type=selector['device_type'])
|
||||
|
||||
for p in self.applied.get('partitions', []):
|
||||
selector = hw_profile.resolve_alias("scsi",
|
||||
p.get_applied_device())
|
||||
if selector is None:
|
||||
p.set_selector("name",
|
||||
address=p.get_applied_device())
|
||||
else:
|
||||
p.set_selector("address", address=selector['address'],
|
||||
dev_type=selector['device_type'])
|
||||
|
||||
|
||||
hardware = {"vendor": getattr(hw_profile, 'vendor', None),
|
||||
"generation": getattr(hw_profile, 'generation', None),
|
||||
"hw_version": getattr(hw_profile, 'hw_version', None),
|
||||
"bios_version": getattr(hw_profile, 'bios_version', None),
|
||||
"boot_mode": getattr(hw_profile, 'boot_mode', None),
|
||||
"bootstrap_protocol": getattr(hw_profile,
|
||||
'bootstrap_protocol',
|
||||
None),
|
||||
"pxe_interface": getattr(hw_profile, 'pxe_interface', None)
|
||||
}
|
||||
|
||||
self.applied['hardware'] = hardware
|
||||
|
||||
return
|
||||
|
||||
def apply_network_connections(self, site):
|
||||
for n in site.network_links:
|
||||
for i in self.applied.get('interfaces', []):
|
||||
i.apply_link_config(n)
|
||||
|
||||
for n in site.networks:
|
||||
for i in self.applied.get('interfaces', []):
|
||||
i.apply_network_config(n)
|
||||
|
||||
for a in self.applied.get('addressing', []):
|
||||
for i in self.applied.get('interfaces', []):
|
||||
i.set_network_address(a.get('network'), a.get('address'))
|
||||
|
||||
return
|
||||
|
||||
def get_applied_interface(self, iface_name):
|
||||
if getattr(self, 'applied', None) is not None:
|
||||
for i in self.applied.get('interfaces', []):
|
||||
if i.get_name() == iface_name:
|
||||
return i
|
||||
|
||||
return None
|
||||
|
||||
def get_status(self):
|
||||
self.init_build()
|
||||
return self.build.get('status', NodeStatus.Unknown)
|
||||
|
||||
def set_status(self, status):
|
||||
if isinstance(status, NodeStatus):
|
||||
self.init_build()
|
||||
self.build['status'] = status
|
||||
|
||||
def get_last_build_action(self):
|
||||
if getattr(self, 'build', None) is None:
|
||||
return None
|
||||
|
||||
return self.build.get('last_action', None)
|
||||
|
||||
def set_last_build_action(self, action, result, detail=None):
|
||||
self.init_build()
|
||||
last_action = self.build.get('last_action', None)
|
||||
if last_action is None:
|
||||
self.build['last_action'] = {}
|
||||
last_action = self.build['last_action']
|
||||
last_action['action'] = action
|
||||
last_action['result'] = result
|
||||
if detail is not None:
|
||||
last_action['detail'] = detail
|
||||
|
||||
def get_network_address(self, network_name):
|
||||
if self.applied is None:
|
||||
return None
|
||||
|
||||
for a in self.applied.get('addressing', []):
|
||||
if a.get('network', None) == network_name:
|
||||
return a.get('address', None)
|
||||
|
||||
return None
|
@ -1,125 +0,0 @@
|
||||
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Models for helm_drydock
|
||||
#
|
||||
import logging
|
||||
|
||||
from copy import deepcopy
|
||||
|
||||
from helm_drydock.enum import SiteStatus
|
||||
from helm_drydock.enum import NodeStatus
|
||||
|
||||
class Site(object):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
self.log = logging.Logger('model')
|
||||
|
||||
if kwargs is None:
|
||||
raise ValueError("Empty arguments")
|
||||
|
||||
self.api_version = kwargs.get('apiVersion', '')
|
||||
|
||||
self.build = kwargs.get('build', {})
|
||||
|
||||
if self.api_version == "v1.0":
|
||||
metadata = kwargs.get('metadata', {})
|
||||
|
||||
# Need to add validation logic, we'll assume the input is
|
||||
# valid for now
|
||||
self.name = metadata.get('name', '')
|
||||
|
||||
spec = kwargs.get('spec', {})
|
||||
|
||||
self.tag_definitions = []
|
||||
tag_defs = spec.get('tag_definitions', [])
|
||||
|
||||
for t in tag_defs:
|
||||
self.tag_definitions.append(
|
||||
NodeTagDefinition(self.api_version, **t))
|
||||
|
||||
self.networks = []
|
||||
self.network_links = []
|
||||
self.host_profiles = []
|
||||
self.hardware_profiles = []
|
||||
self.baremetal_nodes = []
|
||||
|
||||
else:
|
||||
self.log.error("Unknown API version %s of %s" %
|
||||
(self.api_version, self.__class__))
|
||||
raise ValueError('Unknown API version of object')
|
||||
|
||||
def get_name(self):
|
||||
return self.name
|
||||
|
||||
def start_build(self):
|
||||
if self.build.get('status', '') == '':
|
||||
self.build['status'] = SiteStatus.Unknown
|
||||
|
||||
def get_network(self, network_name):
|
||||
for n in self.networks:
|
||||
if n.name == network_name:
|
||||
return n
|
||||
|
||||
return None
|
||||
|
||||
def get_network_link(self, link_name):
|
||||
for l in self.network_links:
|
||||
if l.name == link_name:
|
||||
return l
|
||||
|
||||
return None
|
||||
|
||||
def get_host_profile(self, profile_name):
|
||||
for p in self.host_profiles:
|
||||
if p.name == profile_name:
|
||||
return p
|
||||
|
||||
return None
|
||||
|
||||
def get_hardware_profile(self, profile_name):
|
||||
for p in self.hardware_profiles:
|
||||
if p.name == profile_name:
|
||||
return p
|
||||
|
||||
return None
|
||||
|
||||
def get_baremetal_node(self, node_name):
|
||||
for n in self.baremetal_nodes:
|
||||
if n.name == node_name:
|
||||
return n
|
||||
|
||||
return None
|
||||
|
||||
def set_status(self, status):
|
||||
if isinstance(status, SiteStatus):
|
||||
self.build['status'] = status
|
||||
|
||||
class NodeTagDefinition(object):
|
||||
|
||||
def __init__(self, api_version, **kwargs):
|
||||
self.api_version = api_version
|
||||
|
||||
if self.api_version == "v1.0":
|
||||
self.tag = kwargs.get('tag', '')
|
||||
self.definition_type = kwargs.get('definition_type', '')
|
||||
self.definition = kwargs.get('definition', '')
|
||||
|
||||
if self.definition_type not in ['lshw_xpath']:
|
||||
raise ValueError('Unknown definition type in ' \
|
||||
'NodeTagDefinition: %s' % (self.definition_type))
|
||||
else:
|
||||
self.log.error("Unknown API version %s of %s" %
|
||||
(self.api_version, self.__class__))
|
||||
raise ValueError('Unknown API version of object')
|
@ -18,6 +18,17 @@ import logging
|
||||
|
||||
from copy import deepcopy
|
||||
|
||||
|
||||
def register_all():
|
||||
# NOTE(sh8121att) - Import all versioned objects so
|
||||
# they are available via RPC. Any new object definitions
|
||||
# need to be added here.
|
||||
__import__('helm_drydock.objects.network')
|
||||
__import__('helm_drydock.objects.node')
|
||||
__import__('helm_drydock.objects.hostprofile')
|
||||
__import__('helm_drydock.objects.hwprofile')
|
||||
__import__('helm_drydock.objects.site')
|
||||
|
||||
# Utility class for calculating inheritance
|
||||
|
||||
class Utils(object):
|
||||
@ -74,18 +85,18 @@ class Utils(object):
|
||||
@staticmethod
|
||||
def merge_lists(child_list, parent_list):
|
||||
|
||||
if type(child_list) is not list or type(parent_list) is not list:
|
||||
raise ValueError("One parameter is not a list")
|
||||
|
||||
effective_list = []
|
||||
|
||||
# Probably should handle non-string values
|
||||
effective_list.extend(
|
||||
filter(lambda x: not x.startswith("!"), child_list))
|
||||
try:
|
||||
# Probably should handle non-string values
|
||||
effective_list.extend(
|
||||
filter(lambda x: not x.startswith("!"), child_list))
|
||||
|
||||
effective_list.extend(
|
||||
filter(lambda x: ("!" + x) not in child_list,
|
||||
filter(lambda x: x not in effective_list, parent_list)))
|
||||
effective_list.extend(
|
||||
filter(lambda x: ("!" + x) not in child_list,
|
||||
filter(lambda x: x not in effective_list, parent_list)))
|
||||
except TypeError:
|
||||
raise TypeError("Error iterating list argument")
|
||||
|
||||
return effective_list
|
||||
|
||||
@ -107,21 +118,21 @@ class Utils(object):
|
||||
@staticmethod
|
||||
def merge_dicts(child_dict, parent_dict):
|
||||
|
||||
if type(child_dict) is not dict or type(parent_dict) is not dict:
|
||||
raise ValueError("One parameter is not a dict")
|
||||
|
||||
effective_dict = {}
|
||||
|
||||
# Probably should handle non-string keys
|
||||
use_keys = filter(lambda x: ("!" + x) not in child_dict.keys(),
|
||||
parent_dict)
|
||||
try:
|
||||
# Probably should handle non-string keys
|
||||
use_keys = filter(lambda x: ("!" + x) not in child_dict.keys(),
|
||||
parent_dict)
|
||||
|
||||
for k in use_keys:
|
||||
effective_dict[k] = deepcopy(parent_dict[k])
|
||||
for k in use_keys:
|
||||
effective_dict[k] = deepcopy(parent_dict[k])
|
||||
|
||||
use_keys = filter(lambda x: not x.startswith("!"), child_dict)
|
||||
|
||||
for k in use_keys:
|
||||
effective_dict[k] = deepcopy(child_dict[k])
|
||||
use_keys = filter(lambda x: not x.startswith("!"), child_dict)
|
||||
|
||||
for k in use_keys:
|
||||
effective_dict[k] = deepcopy(child_dict[k])
|
||||
except TypeError:
|
||||
raise TypeError("Error iterating dict argument")
|
||||
|
||||
return effective_dict
|
68
helm_drydock/objects/base.py
Normal file
68
helm_drydock/objects/base.py
Normal file
@ -0,0 +1,68 @@
|
||||
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from oslo_versionedobjects import base
|
||||
from oslo_versionedobjects import fields as obj_fields
|
||||
|
||||
import helm_drydock.objects as objects
|
||||
|
||||
class DrydockObjectRegistry(base.VersionedObjectRegistry):
|
||||
|
||||
# Steal this from Cinder to bring all registered objects
|
||||
# into the helm_drydock.objects namespace
|
||||
|
||||
def registration_hook(self, cls, index):
|
||||
setattr(objects, cls.obj_name(), cls)
|
||||
|
||||
class DrydockObject(base.VersionedObject):
|
||||
|
||||
VERSION = '1.0'
|
||||
|
||||
OBJ_PROJECT_NAMESPACE = 'helm_drydock.objects'
|
||||
|
||||
class DrydockPersistentObject(base.VersionedObject):
|
||||
|
||||
fields = {
|
||||
'created_at': obj_fields.DateTimeField(nullable=False),
|
||||
'created_by': obj_fields.StringField(nullable=False),
|
||||
'updated_at': obj_fields.DateTimeField(nullable=True),
|
||||
'updated_by': obj_fields.StringField(nullable=True),
|
||||
}
|
||||
|
||||
class DrydockObjectListBase(base.ObjectListBase):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(DrydockObjectListBase, self).__init__(**kwargs)
|
||||
|
||||
def append(self, obj):
|
||||
self.objects.append(obj)
|
||||
|
||||
def replace_by_id(self, obj):
|
||||
i = 0;
|
||||
while i < len(self.objects):
|
||||
if self.objects[i].get_id() == obj.get_id():
|
||||
objects[i] = obj
|
||||
return True
|
||||
i = i + 1
|
||||
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def from_basic_list(cls, obj_list):
|
||||
model_list = cls()
|
||||
|
||||
for o in obj_list:
|
||||
model_list.append(o)
|
||||
|
||||
return model_list
|
@ -11,10 +11,15 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from enum import Enum, unique
|
||||
|
||||
@unique
|
||||
class OrchestratorAction(Enum):
|
||||
from oslo_versionedobjects import fields
|
||||
|
||||
class BaseDrydockEnum(fields.Enum):
|
||||
def __init__(self):
|
||||
super(BaseDrydockEnum, self).__init__(valid_values=self.__class__.ALL)
|
||||
|
||||
class OrchestratorAction(BaseDrydockEnum):
|
||||
# Orchestrator actions
|
||||
Noop = 'noop'
|
||||
ValidateDesign = 'validate_design'
|
||||
VerifySite = 'verify_site'
|
||||
@ -24,8 +29,7 @@ class OrchestratorAction(Enum):
|
||||
DeployNode = 'deploy_node'
|
||||
DestroyNode = 'destroy_node'
|
||||
|
||||
@unique
|
||||
class OobAction(Enum):
|
||||
# OOB driver actions
|
||||
ConfigNodePxe = 'config_node_pxe'
|
||||
SetNodeBoot = 'set_node_boot'
|
||||
PowerOffNode = 'power_off_node'
|
||||
@ -33,16 +37,53 @@ class OobAction(Enum):
|
||||
PowerCycleNode = 'power_cycle_node'
|
||||
InterrogateNode = 'interrogate_node'
|
||||
|
||||
@unique
|
||||
class ActionResult(Enum):
|
||||
ALL = (Noop, ValidateDesign, VerifySite, PrepareSite, VerifyNode,
|
||||
PrepareNode, DeployNode, DestroyNode, ConfigNodePxe,
|
||||
SetNodeBoot, PowerOffNode, PowerOnNode, PowerCycleNode,
|
||||
InterrogateNode)
|
||||
|
||||
class OrchestratorActionField(fields.BaseEnumField):
|
||||
AUTO_TYPE = OrchestratorAction()
|
||||
|
||||
class ActionResult(BaseDrydockEnum):
|
||||
Incomplete = 'incomplete'
|
||||
Success = 'success'
|
||||
PartialSuccess = 'partial_success'
|
||||
Failure = 'failure'
|
||||
DependentFailure = 'dependent_failure'
|
||||
|
||||
@unique
|
||||
class SiteStatus(Enum):
|
||||
ALL = (Incomplete, Success, PartialSuccess, Failure)
|
||||
|
||||
class ActionResultField(fields.BaseEnumField):
|
||||
AUTO_TYPE = ActionResult()
|
||||
|
||||
class TaskStatus(BaseDrydockEnum):
|
||||
Created = 'created'
|
||||
Waiting = 'waiting'
|
||||
Running = 'running'
|
||||
Stopping = 'stopping'
|
||||
Terminated = 'terminated'
|
||||
Errored = 'errored'
|
||||
Complete = 'complete'
|
||||
Stopped = 'stopped'
|
||||
|
||||
ALL = (Created, Waiting, Running, Stopping, Terminated,
|
||||
Errored, Complete, Stopped)
|
||||
|
||||
class TaskStatusField(fields.BaseEnumField):
|
||||
AUTO_TYPE = TaskStatus()
|
||||
|
||||
class ModelSource(BaseDrydockEnum):
|
||||
Designed = 'designed'
|
||||
Compiled = 'compiled'
|
||||
Build = 'build'
|
||||
|
||||
ALL = (Designed, Compiled, Build)
|
||||
|
||||
class ModelSourceField(fields.BaseEnumField):
|
||||
AUTO_TYPE = ModelSource()
|
||||
|
||||
class SiteStatus(BaseDrydockEnum):
|
||||
Unknown = 'unknown'
|
||||
DesignStarted = 'design_started'
|
||||
DesignAvailable = 'design_available'
|
||||
@ -51,10 +92,15 @@ class SiteStatus(Enum):
|
||||
Deployed = 'deployed'
|
||||
DesignUpdated = 'design_updated'
|
||||
|
||||
@unique
|
||||
class NodeStatus(Enum):
|
||||
ALL = (Unknown, Deploying, Deployed)
|
||||
|
||||
class SiteStatusField(fields.BaseEnumField):
|
||||
AUTO_TYPE = SiteStatus()
|
||||
|
||||
class NodeStatus(BaseDrydockEnum):
|
||||
Unknown = 'unknown'
|
||||
Designed = 'designed'
|
||||
Compiled = 'compiled' # Node attributes represent effective config after inheritance/merge
|
||||
Present = 'present' # IPMI access verified
|
||||
BasicVerifying = 'basic_verifying' # Base node verification in process
|
||||
FailedBasicVerify = 'failed_basic_verify' # Base node verification failed
|
||||
@ -73,13 +119,31 @@ class NodeStatus(Enum):
|
||||
Bootstrapped = 'bootstrapped' # Node fully bootstrapped
|
||||
Complete = 'complete' # Node is complete
|
||||
|
||||
@unique
|
||||
class TaskStatus(Enum):
|
||||
Created = 'created'
|
||||
Waiting = 'waiting'
|
||||
Running = 'running'
|
||||
Stopping = 'stopping'
|
||||
Terminated = 'terminated'
|
||||
Errored = 'errored'
|
||||
Complete = 'complete'
|
||||
Stopped = 'stopped'
|
||||
ALL = (Unknown, Designed, Compiled, Present, BasicVerifying, FailedBasicVerify,
|
||||
BasicVerified, Preparing, FailedPrepare, Prepared, FullyVerifying,
|
||||
FailedFullVerify, FullyVerified, Deploying, FailedDeploy, Deployed,
|
||||
Bootstrapping, FailedBootstrap, Bootstrapped, Complete)
|
||||
|
||||
|
||||
class NodeStatusField(fields.BaseEnumField):
|
||||
AUTO_TYPE = NodeStatus()
|
||||
|
||||
class NetworkLinkBondingMode(BaseDrydockEnum):
|
||||
Disabled = 'disabled'
|
||||
LACP = '802.3ad'
|
||||
RoundRobin = 'balanced-rr'
|
||||
Standby = 'active-backup'
|
||||
|
||||
ALL = (Disabled, LACP, RoundRobin, Standby)
|
||||
|
||||
class NetworkLinkBondingModeField(fields.BaseEnumField):
|
||||
AUTO_TYPE = NetworkLinkBondingMode()
|
||||
|
||||
class NetworkLinkTrunkingMode(BaseDrydockEnum):
|
||||
Disabled = 'disabled'
|
||||
Tagged = '802.1q'
|
||||
|
||||
ALL = (Disabled, Tagged)
|
||||
|
||||
class NetworkLinkTrunkingModeField(fields.BaseEnumField):
|
||||
AUTO_TYPE = NetworkLinkTrunkingMode()
|
381
helm_drydock/objects/hostprofile.py
Normal file
381
helm_drydock/objects/hostprofile.py
Normal file
@ -0,0 +1,381 @@
|
||||
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from copy import deepcopy
|
||||
|
||||
import oslo_versionedobjects.fields as obj_fields
|
||||
|
||||
import helm_drydock.objects as objects
|
||||
import helm_drydock.objects.base as base
|
||||
import helm_drydock.objects.fields as hd_fields
|
||||
|
||||
|
||||
@base.DrydockObjectRegistry.register
|
||||
class HostProfile(base.DrydockPersistentObject, base.DrydockObject):
|
||||
|
||||
VERSION = '1.0'
|
||||
|
||||
fields = {
|
||||
'name': obj_fields.StringField(nullable=False),
|
||||
'site': obj_fields.StringField(nullable=False),
|
||||
'source': hd_fields.ModelSourceField(nullable=False),
|
||||
'parent_profile': obj_fields.StringField(nullable=True),
|
||||
'hardware_profile': obj_fields.StringField(nullable=True),
|
||||
'oob_type': obj_fields.StringField(nullable=True),
|
||||
'oob_network': obj_fields.StringField(nullable=True),
|
||||
'oob_account': obj_fields.StringField(nullable=True),
|
||||
'oob_credential': obj_fields.StringField(nullable=True),
|
||||
'storage_layout': obj_fields.StringField(nullable=True),
|
||||
'bootdisk_device': obj_fields.StringField(nullable=True),
|
||||
# Consider a custom field for storage size
|
||||
'bootdisk_root_size': obj_fields.StringField(nullable=True),
|
||||
'bootdisk_boot_size': obj_fields.StringField(nullable=True),
|
||||
'partitions': obj_fields.ObjectField('HostPartitionList',
|
||||
nullable=True),
|
||||
'interfaces': obj_fields.ObjectField('HostInterfaceList',
|
||||
nullable=True),
|
||||
'tags': obj_fields.ListOfStringsField(nullable=True),
|
||||
'owner_data': obj_fields.DictOfStringsField(nullable=True),
|
||||
'rack': obj_fields.StringField(nullable=True),
|
||||
}
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(HostProfile, self).__init__(**kwargs)
|
||||
|
||||
|
||||
def get_rack(self):
|
||||
return self.rack
|
||||
|
||||
# HostProfile is keyed by name
|
||||
def get_id(self):
|
||||
return self.get_name()
|
||||
|
||||
def get_name(self):
|
||||
return self.name
|
||||
|
||||
def has_tag(self, tag):
|
||||
if tag in self.tags:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def apply_inheritance(self, site_design):
|
||||
# No parent to inherit from, just apply design values
|
||||
# and return
|
||||
if self.parent_profile is None:
|
||||
self.source = hd_fields.ModelSource.Compiled
|
||||
return
|
||||
|
||||
parent = site_design.get_host_profile(self.parent_profile)
|
||||
|
||||
if parent is None:
|
||||
raise NameError("Cannot find parent profile %s for %s"
|
||||
% (self.design['parent_profile'], self.name))
|
||||
|
||||
parent.apply_inheritance(site_design)
|
||||
|
||||
# First compute inheritance for simple fields
|
||||
inheritable_field_list = [
|
||||
"hardware_profile", "oob_type", "oob_network",
|
||||
"oob_credential", "oob_account", "storage_layout",
|
||||
"bootdisk_device", "bootdisk_root_size", "bootdisk_boot_size",
|
||||
"rack"]
|
||||
|
||||
# Create applied data from self design values and parent
|
||||
# applied values
|
||||
|
||||
for f in inheritable_field_list:
|
||||
setattr(self, f, objects.Utils.apply_field_inheritance(
|
||||
getattr(self, f, None),
|
||||
getattr(parent, f, None)))
|
||||
|
||||
# Now compute inheritance for complex types
|
||||
self.tags = objects.Utils.merge_lists(self.tags, parent.tags)
|
||||
|
||||
self.owner_data = objects.Utils.merge_dicts(self.owner_data, parent.owner_data)
|
||||
|
||||
self.interfaces = HostInterfaceList.from_basic_list(
|
||||
HostInterface.merge_lists(self.interfaces, parent.interfaces))
|
||||
|
||||
self.partitions = HostPartitionList.from_basic_list(
|
||||
HostPartition.merge_lists(self.partitions, parent.partitions))
|
||||
|
||||
self.source = hd_fields.ModelSource.Compiled
|
||||
|
||||
return
|
||||
|
||||
@base.DrydockObjectRegistry.register
|
||||
class HostProfileList(base.DrydockObjectListBase, base.DrydockObject):
|
||||
|
||||
VERSION = '1.0'
|
||||
|
||||
fields = {
|
||||
'objects': obj_fields.ListOfObjectsField('HostProfile')
|
||||
}
|
||||
|
||||
@base.DrydockObjectRegistry.register
|
||||
class HostInterface(base.DrydockObject):
|
||||
|
||||
VERSION = '1.0'
|
||||
|
||||
fields = {
|
||||
'device_name': obj_fields.StringField(),
|
||||
'source': hd_fields.ModelSourceField(),
|
||||
'network_link': obj_fields.StringField(nullable=True),
|
||||
'hardware_slaves': obj_fields.ListOfStringsField(nullable=True),
|
||||
'slave_selectors': obj_fields.ObjectField('HardwareDeviceSelectorList',
|
||||
nullable=True),
|
||||
'networks': obj_fields.ListOfStringsField(nullable=True),
|
||||
}
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(HostInterface, self).__init__(**kwargs)
|
||||
|
||||
# HostInterface is keyed by device_name
|
||||
def get_id(self):
|
||||
return self.get_name()
|
||||
|
||||
def get_name(self):
|
||||
return self.device_name
|
||||
|
||||
def get_hw_slaves(self):
|
||||
return self.hardware_slaves
|
||||
|
||||
def get_slave_selectors(self):
|
||||
return self.slave_selectors
|
||||
|
||||
# Return number of slaves for this interface
|
||||
def get_slave_count(self):
|
||||
return len(self.hardware_slaves)
|
||||
|
||||
# The device attribute may be hardware alias that translates to a
|
||||
# physical device address. If the device attribute does not match an
|
||||
# alias, we assume it directly identifies a OS device name. When the
|
||||
# apply_hardware_profile method is called on the parent Node of this
|
||||
# device, the selector will be decided and applied
|
||||
|
||||
def add_selector(self, slave_selector):
|
||||
if self.slave_selectors is None:
|
||||
self.slave_selectors = objects.HardwareDeviceSelectorList()
|
||||
|
||||
self.slave_selectors.append(slave_selector)
|
||||
|
||||
"""
|
||||
Merge two lists of HostInterface models with child_list taking
|
||||
priority when conflicts. If a member of child_list has a device_name
|
||||
beginning with '!' it indicates that HostInterface should be
|
||||
removed from the merged list
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def merge_lists(child_list, parent_list):
|
||||
effective_list = []
|
||||
|
||||
if len(child_list) == 0 and len(parent_list) > 0:
|
||||
for p in parent_list:
|
||||
pp = deepcopy(p)
|
||||
pp.source = hd_obj_fields.ModelSource.Compiled
|
||||
effective_list.append(pp)
|
||||
elif len(parent_list) == 0 and len(child_list) > 0:
|
||||
for i in child_list:
|
||||
if i.get_name().startswith('!'):
|
||||
continue
|
||||
else:
|
||||
ii = deepcopy(i)
|
||||
ii.source = hd_obj_fields.ModelSource.Compiled
|
||||
effective_list.append(ii)
|
||||
elif len(parent_list) > 0 and len(child_list) > 0:
|
||||
parent_interfaces = []
|
||||
for i in parent_list:
|
||||
parent_name = i.get_name()
|
||||
parent_interfaces.append(parent_name)
|
||||
add = True
|
||||
for j in child_list:
|
||||
if j.get_name() == ("!" + parent_name):
|
||||
add = False
|
||||
break
|
||||
elif j.get_name() == parent_name:
|
||||
m = objects.HostInterface()
|
||||
m.device_name = j.get_name()
|
||||
m.network_link = \
|
||||
objects.Utils.apply_field_inheritance(
|
||||
getattr(j, 'network_link', None),
|
||||
getattr(i, 'network_link', None))
|
||||
|
||||
s = [x for x
|
||||
in getattr(i, 'hardware_slaves', [])
|
||||
if ("!" + x) not in getattr(j, 'hardware_slaves', [])]
|
||||
|
||||
s.extend(
|
||||
[x for x
|
||||
in getattr(j, 'hardware_slaves', [])
|
||||
if not x.startswith("!")])
|
||||
|
||||
m.hardware_slaves = s
|
||||
|
||||
n = [x for x
|
||||
in getattr(i, 'networks',[])
|
||||
if ("!" + x) not in getattr(j, 'networks', [])]
|
||||
|
||||
n.extend(
|
||||
[x for x
|
||||
in getattr(j, 'networks', [])
|
||||
if not x.startswith("!")])
|
||||
|
||||
m.networks = n
|
||||
m.source = hd_obj_fields.ModelSource.Compiled
|
||||
|
||||
effective_list.append(m)
|
||||
add = False
|
||||
break
|
||||
|
||||
if add:
|
||||
ii = deepcopy(i)
|
||||
ii.source = hd_obj_fields.ModelSource.Compiled
|
||||
effective_list.append(ii)
|
||||
|
||||
for j in child_list:
|
||||
if (j.device_name not in parent_interfaces
|
||||
and not j.get_name().startswith("!")):
|
||||
jj = deepcopy(j)
|
||||
jj.source = hd_obj_fields.ModelSource.Compiled
|
||||
effective_list.append(jj)
|
||||
|
||||
return effective_list
|
||||
|
||||
@base.DrydockObjectRegistry.register
|
||||
class HostInterfaceList(base.DrydockObjectListBase, base.DrydockObject):
|
||||
|
||||
VERSION = '1.0'
|
||||
|
||||
fields = {
|
||||
'objects': obj_fields.ListOfObjectsField('HostInterface')
|
||||
}
|
||||
|
||||
@base.DrydockObjectRegistry.register
|
||||
class HostPartition(base.DrydockObject):
|
||||
|
||||
VERSION = '1.0'
|
||||
|
||||
fields = {
|
||||
'name': obj_fields.StringField(),
|
||||
'source': hd_fields.ModelSourceField(),
|
||||
'device': obj_fields.StringField(nullable=True),
|
||||
'part_uuid': obj_fields.UUIDField(nullable=True),
|
||||
'size': obj_fields.StringField(nullable=True),
|
||||
'mountpoint': obj_fields.StringField(nullable=True),
|
||||
'fstype': obj_fields.StringField(nullable=True, default='ext4'),
|
||||
'mount_options': obj_fields.StringField(nullable=True, default='defaults'),
|
||||
'fs_uuid': obj_fields.UUIDField(nullable=True),
|
||||
'fs_label': obj_fields.StringField(nullable=True),
|
||||
'selector': obj_fields.ObjectField('HardwareDeviceSelector',
|
||||
nullable=True),
|
||||
}
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(HostPartition, self).__init__(**kwargs)
|
||||
|
||||
def get_device(self):
|
||||
return self.device
|
||||
|
||||
# HostPartition keyed by name
|
||||
def get_id(self):
|
||||
return self.get_name()
|
||||
|
||||
def get_name(self):
|
||||
return self.name
|
||||
|
||||
# The device attribute may be hardware alias that translates to a
|
||||
# physical device address. If the device attribute does not match an
|
||||
# alias, we assume it directly identifies a OS device name. When the
|
||||
# apply_hardware_profile method is called on the parent Node of this
|
||||
# device, the selector will be decided and applied
|
||||
|
||||
def set_selector(self, selector):
|
||||
self.selector = selector
|
||||
|
||||
def get_selector(self):
|
||||
return self.selector
|
||||
|
||||
"""
|
||||
Merge two lists of HostPartition models with child_list taking
|
||||
priority when conflicts. If a member of child_list has a name
|
||||
beginning with '!' it indicates that HostPartition should be
|
||||
removed from the merged list
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def merge_lists(child_list, parent_list):
|
||||
effective_list = []
|
||||
|
||||
if len(child_list) == 0 and len(parent_list) > 0:
|
||||
for p in parent_list:
|
||||
pp = deepcopy(p)
|
||||
pp.source = hd_fields.ModelSource.Compiled
|
||||
effective_list.append(pp)
|
||||
elif len(parent_list) == 0 and len(child_list) > 0:
|
||||
for i in child_list:
|
||||
if i.get_name().startswith('!'):
|
||||
continue
|
||||
else:
|
||||
ii = deepcopy(i)
|
||||
ii.source = hd_fields.ModelSource.Compiled
|
||||
effective_list.append(ii)
|
||||
elif len(parent_list) > 0 and len(child_list) > 0:
|
||||
inherit_field_list = ["device", "part_uuid", "size",
|
||||
"mountpoint", "fstype", "mount_options",
|
||||
"fs_uuid", "fs_label"]
|
||||
parent_partitions = []
|
||||
for i in parent_list:
|
||||
parent_name = i.get_name()
|
||||
parent_partitions.append(parent_name)
|
||||
add = True
|
||||
for j in child_list:
|
||||
if j.get_name() == ("!" + parent_name):
|
||||
add = False
|
||||
break
|
||||
elif j.get_name() == parent_name:
|
||||
p = objects.HostPartition()
|
||||
p.name = j.get_name()
|
||||
|
||||
for f in inherit_field_list:
|
||||
setattr(p, f,
|
||||
objects.Utils.apply_field_inheritance(getattr(j, f, None),
|
||||
getattr(i, f, None)))
|
||||
add = False
|
||||
p.source = hd_fields.ModelSource.Compiled
|
||||
effective_list.append(p)
|
||||
if add:
|
||||
ii = deepcopy(i)
|
||||
ii.source = hd_fields.ModelSource.Compiled
|
||||
effective_list.append(ii)
|
||||
|
||||
for j in child_list:
|
||||
if (j.get_name() not in parent_list and
|
||||
not j.get_name().startswith("!")):
|
||||
jj = deepcopy(j)
|
||||
jj.source = hd_fields.ModelSource.Compiled
|
||||
effective_list.append(jj)
|
||||
|
||||
return effective_list
|
||||
|
||||
|
||||
@base.DrydockObjectRegistry.register
|
||||
class HostPartitionList(base.DrydockObjectListBase, base.DrydockObject):
|
||||
|
||||
VERSION = '1.0'
|
||||
|
||||
fields = {
|
||||
'objects': obj_fields.ListOfObjectsField('HostPartition')
|
||||
}
|
125
helm_drydock/objects/hwprofile.py
Normal file
125
helm_drydock/objects/hwprofile.py
Normal file
@ -0,0 +1,125 @@
|
||||
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from copy import deepcopy
|
||||
|
||||
from oslo_versionedobjects import fields as ovo_fields
|
||||
|
||||
import helm_drydock.objects as objects
|
||||
import helm_drydock.objects.base as base
|
||||
import helm_drydock.objects.fields as hd_fields
|
||||
|
||||
@base.DrydockObjectRegistry.register
|
||||
class HardwareProfile(base.DrydockPersistentObject, base.DrydockObject):
|
||||
|
||||
VERSION = '1.0'
|
||||
|
||||
fields = {
|
||||
'name': ovo_fields.StringField(),
|
||||
'source': hd_fields.ModelSourceField(),
|
||||
'site': ovo_fields.StringField(),
|
||||
'vendor': ovo_fields.StringField(nullable=True),
|
||||
'generation': ovo_fields.StringField(nullable=True),
|
||||
'hw_version': ovo_fields.StringField(nullable=True),
|
||||
'bios_version': ovo_fields.StringField(nullable=True),
|
||||
'boot_mode': ovo_fields.StringField(nullable=True),
|
||||
'bootstrap_protocol': ovo_fields.StringField(nullable=True),
|
||||
'pxe_interface': ovo_fields.StringField(nullable=True),
|
||||
'devices': ovo_fields.ObjectField('HardwareDeviceAliasList',
|
||||
nullable=True),
|
||||
}
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(HardwareProfile, self).__init__(**kwargs)
|
||||
|
||||
return
|
||||
|
||||
# HardwareProfile keyed on name
|
||||
def get_id(self):
|
||||
return self.get_name()
|
||||
|
||||
def get_name(self):
|
||||
return self.name
|
||||
|
||||
def resolve_alias(self, alias_type, alias):
|
||||
for d in self.devices:
|
||||
if d.alias == alias and d.bus_type == alias_type:
|
||||
selector = objects.HardwareDeviceSelector()
|
||||
selector.selector_type = "address"
|
||||
selector.address = d.address
|
||||
selector.device_type = d.dev_type
|
||||
return selector
|
||||
|
||||
return None
|
||||
|
||||
@base.DrydockObjectRegistry.register
|
||||
class HardwareProfileList(base.DrydockObjectListBase, base.DrydockObject):
|
||||
|
||||
VERSION = '1.0'
|
||||
|
||||
fields = {
|
||||
'objects': ovo_fields.ListOfObjectsField('HardwareProfile')
|
||||
}
|
||||
|
||||
@base.DrydockObjectRegistry.register
|
||||
class HardwareDeviceAlias(base.DrydockObject):
|
||||
|
||||
VERSION = '1.0'
|
||||
|
||||
fields = {
|
||||
'alias': ovo_fields.StringField(),
|
||||
'source': hd_fields.ModelSourceField(),
|
||||
'address': ovo_fields.StringField(),
|
||||
'bus_type': ovo_fields.StringField(),
|
||||
'dev_type': ovo_fields.StringField(nullable=True),
|
||||
}
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(HardwareDeviceAlias, self).__init__(**kwargs)
|
||||
|
||||
# HardwareDeviceAlias keyed on alias
|
||||
def get_id(self):
|
||||
return self.alias
|
||||
|
||||
@base.DrydockObjectRegistry.register
|
||||
class HardwareDeviceAliasList(base.DrydockObjectListBase, base.DrydockObject):
|
||||
|
||||
VERSION = '1.0'
|
||||
|
||||
fields = {
|
||||
'objects': ovo_fields.ListOfObjectsField('HardwareDeviceAlias')
|
||||
}
|
||||
|
||||
@base.DrydockObjectRegistry.register
|
||||
class HardwareDeviceSelector(base.DrydockObject):
|
||||
|
||||
VERSION = '1.0'
|
||||
|
||||
fields = {
|
||||
'selector_type': ovo_fields.StringField(),
|
||||
'address': ovo_fields.StringField(),
|
||||
'device_type': ovo_fields.StringField()
|
||||
}
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(HardwareDeviceSelector, self).__init__(**kwargs)
|
||||
|
||||
@base.DrydockObjectRegistry.register
|
||||
class HardwareDeviceSelectorList(base.DrydockObjectListBase, base.DrydockObject):
|
||||
|
||||
VERSION = '1.0'
|
||||
|
||||
fields = {
|
||||
'objects': ovo_fields.ListOfObjectsField('HardwareDeviceSelector')
|
||||
}
|
109
helm_drydock/objects/network.py
Normal file
109
helm_drydock/objects/network.py
Normal file
@ -0,0 +1,109 @@
|
||||
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Models for helm_drydock
|
||||
#
|
||||
import logging
|
||||
|
||||
from copy import deepcopy
|
||||
|
||||
import oslo_versionedobjects.fields as ovo_fields
|
||||
|
||||
import helm_drydock.objects as objects
|
||||
import helm_drydock.objects.base as base
|
||||
import helm_drydock.objects.fields as hd_fields
|
||||
|
||||
@base.DrydockObjectRegistry.register
|
||||
class NetworkLink(base.DrydockPersistentObject, base.DrydockObject):
|
||||
|
||||
VERSION = '1.0'
|
||||
|
||||
fields = {
|
||||
'name': ovo_fields.StringField(),
|
||||
'site': ovo_fields.StringField(),
|
||||
'bonding_mode': hd_fields.NetworkLinkBondingModeField(
|
||||
default=hd_fields.NetworkLinkBondingMode.Disabled),
|
||||
'bonding_xmit_hash': ovo_fields.StringField(nullable=True),
|
||||
'bonding_peer_rate': ovo_fields.StringField(nullable=True),
|
||||
'bonding_mon_rate': ovo_fields.IntegerField(nullable=True),
|
||||
'bonding_up_delay': ovo_fields.IntegerField(nullable=True),
|
||||
'bonding_down_delay': ovo_fields.IntegerField(nullable=True),
|
||||
'mtu': ovo_fields.IntegerField(default=1500),
|
||||
'linkspeed': ovo_fields.StringField(default='auto'),
|
||||
'trunk_mode': hd_fields.NetworkLinkTrunkingModeField(
|
||||
default=hd_fields.NetworkLinkTrunkingMode.Disabled),
|
||||
'native_network': ovo_fields.StringField(nullable=True),
|
||||
}
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(NetworkLink, self).__init__(**kwargs)
|
||||
|
||||
# NetworkLink keyed by name
|
||||
def get_id(self):
|
||||
return self.get_name()
|
||||
|
||||
def get_name(self):
|
||||
return self.name
|
||||
|
||||
|
||||
@base.DrydockObjectRegistry.register
|
||||
class NetworkLinkList(base.DrydockObjectListBase, base.DrydockObject):
|
||||
|
||||
VERSION = '1.0'
|
||||
|
||||
fields = {
|
||||
'objects': ovo_fields.ListOfObjectsField('NetworkLink'),
|
||||
}
|
||||
|
||||
|
||||
@base.DrydockObjectRegistry.register
|
||||
class Network(base.DrydockPersistentObject, base.DrydockObject):
|
||||
|
||||
VERSION = '1.0'
|
||||
|
||||
fields = {
|
||||
'name': ovo_fields.StringField(),
|
||||
'site': ovo_fields.StringField(),
|
||||
'cidr': ovo_fields.StringField(),
|
||||
'allocation_strategy': ovo_fields.StringField(),
|
||||
'vlan_id': ovo_fields.StringField(nullable=True),
|
||||
'mtu': ovo_fields.IntegerField(nullable=True),
|
||||
'dns_domain': ovo_fields.StringField(nullable=True),
|
||||
'dns_servers': ovo_fields.StringField(nullable=True),
|
||||
'ranges': ovo_fields.ListOfDictOfNullableStringsField(),
|
||||
'routes': ovo_fields.ListOfDictOfNullableStringsField(),
|
||||
}
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(Network, self).__init__(**kwargs)
|
||||
|
||||
# Network keyed on name
|
||||
def get_id(self):
|
||||
return self.get_name()
|
||||
|
||||
def get_name(self):
|
||||
return self.name
|
||||
|
||||
|
||||
@base.DrydockObjectRegistry.register
|
||||
class NetworkList(base.DrydockObjectListBase, base.DrydockObject):
|
||||
|
||||
VERSION = '1.0'
|
||||
|
||||
fields = {
|
||||
'objects': ovo_fields.ListOfObjectsField('Network'),
|
||||
}
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(NetworkList, self).__init__(**kwargs)
|
134
helm_drydock/objects/node.py
Normal file
134
helm_drydock/objects/node.py
Normal file
@ -0,0 +1,134 @@
|
||||
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Models for helm_drydock
|
||||
#
|
||||
import logging
|
||||
|
||||
from copy import deepcopy
|
||||
|
||||
from oslo_versionedobjects import fields as ovo_fields
|
||||
|
||||
import helm_drydock.objects as objects
|
||||
import helm_drydock.objects.hostprofile
|
||||
import helm_drydock.objects.base as base
|
||||
import helm_drydock.objects.fields as hd_fields
|
||||
|
||||
@base.DrydockObjectRegistry.register
|
||||
class BaremetalNode(helm_drydock.objects.hostprofile.HostProfile):
|
||||
|
||||
VERSION = '1.0'
|
||||
|
||||
fields = {
|
||||
'addressing': ovo_fields.ObjectField('IpAddressAssignmentList')
|
||||
}
|
||||
|
||||
# A BaremetalNode is really nothing more than a physical
|
||||
# instantiation of a HostProfile, so they both represent
|
||||
# the same set of CIs
|
||||
def __init__(self, **kwargs):
|
||||
super(BaremetalNode, self).__init__(**kwargs)
|
||||
|
||||
# Compile the applied version of this model sourcing referenced
|
||||
# data from the passed site design
|
||||
def compile_applied_model(self, site_design):
|
||||
self.apply_host_profile(site_design)
|
||||
self.apply_hardware_profile(site_design)
|
||||
self.source = hd_fields.ModelSource.Compiled
|
||||
return
|
||||
|
||||
def apply_host_profile(self, site_design):
|
||||
self.apply_inheritance(site_design)
|
||||
return
|
||||
|
||||
# Translate device alises to physical selectors and copy
|
||||
# other hardware attributes into this object
|
||||
def apply_hardware_profile(self, site_design):
|
||||
if self.hardware_profile is None:
|
||||
raise ValueError("Hardware profile not set")
|
||||
|
||||
hw_profile = site_design.get_hardware_profile(self.hardware_profile)
|
||||
|
||||
for i in getattr(self, 'interfaces', []):
|
||||
for s in i.get_hw_slaves():
|
||||
selector = hw_profile.resolve_alias("pci", s)
|
||||
if selector is None:
|
||||
selector = objects.HardwareDeviceSelector()
|
||||
selector.selector_type = 'name'
|
||||
selector.address = s
|
||||
|
||||
i.add_selector(selector)
|
||||
|
||||
for p in getattr(self, 'partitions', []):
|
||||
selector = hw_profile.resolve_alias("scsi", p.get_device())
|
||||
if selector is None:
|
||||
selector = objects.HardwareDeviceSelector()
|
||||
selector.selector_type = 'name'
|
||||
selector.address = p.get_device()
|
||||
p.set_selector(selector)
|
||||
|
||||
return
|
||||
|
||||
def get_applied_interface(self, iface_name):
|
||||
for i in getattr(self, 'interfaces', []):
|
||||
if i.get_name() == iface_name:
|
||||
return i
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def get_network_address(self, network_name):
|
||||
for a in getattr(self, 'addressing', []):
|
||||
if a.network == network_name:
|
||||
return a.address
|
||||
|
||||
return None
|
||||
|
||||
|
||||
@base.DrydockObjectRegistry.register
|
||||
class BaremetalNodeList(base.DrydockObjectListBase, base.DrydockObject):
|
||||
|
||||
VERSION = '1.0'
|
||||
|
||||
fields = {
|
||||
'objects': ovo_fields.ListOfObjectsField('BaremetalNode')
|
||||
}
|
||||
|
||||
|
||||
@base.DrydockObjectRegistry.register
|
||||
class IpAddressAssignment(base.DrydockObject):
|
||||
|
||||
VERSION = '1.0'
|
||||
|
||||
fields = {
|
||||
'type': ovo_fields.StringField(),
|
||||
'address': ovo_fields.StringField(nullable=True),
|
||||
'network': ovo_fields.StringField(),
|
||||
}
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(IpAddressAssignment, self).__init__(**kwargs)
|
||||
|
||||
# IpAddressAssignment keyed by network
|
||||
def get_id(self):
|
||||
return self.network
|
||||
|
||||
@base.DrydockObjectRegistry.register
|
||||
class IpAddressAssignmentList(base.DrydockObjectListBase, base.DrydockObject):
|
||||
|
||||
VERSION = '1.0'
|
||||
|
||||
fields = {
|
||||
'objects': ovo_fields.ListOfObjectsField('IpAddressAssignment')
|
||||
}
|
@ -1,6 +1,7 @@
|
||||
# Drydock Model #
|
||||
|
||||
Models for the drydock design parts and subparts
|
||||
Object models for the drydock design parts and subparts. We use oslo.versionedobjects as the supporting library for object management
|
||||
to support RPC and versioned persistence.
|
||||
|
||||
## Features ##
|
||||
|
||||
@ -25,13 +26,10 @@ based on the name.
|
||||
|
||||
### Phased Data ###
|
||||
|
||||
In other words, as a modeled object goes from design to apply
|
||||
to build the model keeps the data separated to retain reference
|
||||
values and provide context around particular attribute values.
|
||||
The *source* of the data in a object instance can be one of three
|
||||
types.
|
||||
|
||||
* Design - The data ingested from sources such as Formation
|
||||
* Apply - Computing inheritance of design data to render an effective site design
|
||||
* Build - Maintaining actions taken to implement the design and the results
|
||||
|
||||
Currently only applies to BaremetalNodes as no other design parts
|
||||
flow through the build process.
|
||||
* Designed - This is data directly ingested by Drydock representing a design part (Site, HostProfile, etc...) supplied by an external source
|
||||
* Compiled - This is designed data that has been processed through the Drydock
|
||||
inheritance / merge system. It is the effective design that will be implemented.
|
||||
* Build - This is the result of actual implementation. It should basically match the compiled view of the model, but might have some additional information only available after implementation.
|
244
helm_drydock/objects/site.py
Normal file
244
helm_drydock/objects/site.py
Normal file
@ -0,0 +1,244 @@
|
||||
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Models for helm_drydock
|
||||
#
|
||||
from copy import deepcopy
|
||||
import uuid
|
||||
|
||||
import oslo_versionedobjects.fields as ovo_fields
|
||||
|
||||
import helm_drydock.objects as objects
|
||||
import helm_drydock.objects.base as base
|
||||
import helm_drydock.objects.fields as hd_fields
|
||||
|
||||
|
||||
@base.DrydockObjectRegistry.register
|
||||
class Site(base.DrydockPersistentObject, base.DrydockObject):
|
||||
|
||||
VERSION = '1.0'
|
||||
|
||||
fields = {
|
||||
'name': ovo_fields.StringField(),
|
||||
'status': hd_fields.SiteStatusField(default=hd_fields.SiteStatus.Unknown),
|
||||
'source': hd_fields.ModelSourceField(),
|
||||
'tag_definitions': ovo_fields.ObjectField('NodeTagDefinitionList',
|
||||
nullable=True),
|
||||
}
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(Site, self).__init__(**kwargs)
|
||||
|
||||
def get_id(self):
|
||||
return self.name
|
||||
|
||||
def get_name(self):
|
||||
return self.name
|
||||
|
||||
def add_tag_definition(self, tag_definition):
|
||||
self.tag_definitions.append(tag_definition)
|
||||
|
||||
@base.DrydockObjectRegistry.register
|
||||
class NodeTagDefinition(base.DrydockObject):
|
||||
|
||||
VERSION = '1.0'
|
||||
|
||||
fields = {
|
||||
'tag': ovo_fields.StringField(),
|
||||
'type': ovo_fields.StringField(),
|
||||
'definition': ovo_fields.StringField(),
|
||||
'source': hd_fields.ModelSourceField(),
|
||||
}
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(NodeTagDefinition, self).__init__(**kwargs)
|
||||
|
||||
# TagDefinition keyed by tag
|
||||
def get_id(self):
|
||||
return self.tag
|
||||
|
||||
@base.DrydockObjectRegistry.register
|
||||
class NodeTagDefinitionList(base.DrydockObjectListBase, base.DrydockObject):
|
||||
|
||||
VERSION = '1.0'
|
||||
|
||||
fields = {
|
||||
'objects': ovo_fields.ListOfObjectsField('NodeTagDefinition'),
|
||||
}
|
||||
|
||||
@base.DrydockObjectRegistry.register
|
||||
class SiteDesign(base.DrydockPersistentObject, base.DrydockObject):
|
||||
|
||||
VERSION = '1.0'
|
||||
|
||||
fields = {
|
||||
'id': ovo_fields.UUIDField(),
|
||||
# if null, indicates this is the site base design
|
||||
'base_design_id': ovo_fields.UUIDField(nullable=True),
|
||||
'source': hd_fields.ModelSourceField(),
|
||||
'site': ovo_fields.ObjectField('Site', nullable=True),
|
||||
'networks': ovo_fields.ObjectField('NetworkList', nullable=True),
|
||||
'network_links': ovo_fields.ObjectField('NetworkLinkList', nullable=True),
|
||||
'host_profiles': ovo_fields.ObjectField('HostProfileList', nullable=True),
|
||||
'hardware_profiles': ovo_fields.ObjectField('HardwareProfileList', nullable=True),
|
||||
'baremetal_nodes': ovo_fields.ObjectField('BaremetalNodeList', nullable=True),
|
||||
}
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(SiteDesign, self).__init__(**kwargs)
|
||||
|
||||
# Initialize lists for blank instances
|
||||
def obj_load_attr(self, attrname):
|
||||
if attrname in self.fields.keys():
|
||||
setattr(self, attrname, None)
|
||||
else:
|
||||
raise ValueError("Unknown field %s" % (attrname))
|
||||
|
||||
# Assign UUID id
|
||||
def assign_id(self):
|
||||
self.id = uuid.uuid4()
|
||||
return self.id
|
||||
|
||||
# SiteDesign Keyed by id
|
||||
def get_id(self):
|
||||
return self.id
|
||||
|
||||
def get_site(self):
|
||||
return self.site
|
||||
|
||||
def set_site(self, site):
|
||||
self.site = site
|
||||
|
||||
def add_network(self, new_network):
|
||||
if new_network is None:
|
||||
raise DesignError("Invalid Network model")
|
||||
|
||||
if self.networks is None:
|
||||
self.networks = objects.NetworkList()
|
||||
|
||||
self.networks.append(new_network)
|
||||
|
||||
def get_network(self, network_key):
|
||||
for n in self.networks:
|
||||
if n.get_id() == network_key:
|
||||
return n
|
||||
|
||||
raise DesignError("Network %s not found in design state"
|
||||
% network_key)
|
||||
|
||||
def add_network_link(self, new_network_link):
|
||||
if new_network_link is None:
|
||||
raise DesignError("Invalid NetworkLink model")
|
||||
|
||||
if self.network_links is None:
|
||||
self.network_links = objects.NetworkLinkList()
|
||||
|
||||
self.network_links.append(new_network_link)
|
||||
|
||||
def get_network_link(self, link_key):
|
||||
for l in self.network_links:
|
||||
if l.get_id() == link_key:
|
||||
return l
|
||||
|
||||
raise DesignError("NetworkLink %s not found in design state"
|
||||
% link_key)
|
||||
|
||||
def add_host_profile(self, new_host_profile):
|
||||
if new_host_profile is None:
|
||||
raise DesignError("Invalid HostProfile model")
|
||||
|
||||
if self.host_profiles is None:
|
||||
self.host_profiles = objects.HostProfileList()
|
||||
|
||||
self.host_profiles.append(new_host_profile)
|
||||
|
||||
def get_host_profile(self, profile_key):
|
||||
for p in self.host_profiles:
|
||||
if p.get_id() == profile_key:
|
||||
return p
|
||||
|
||||
raise DesignError("HostProfile %s not found in design state"
|
||||
% profile_key)
|
||||
|
||||
def add_hardware_profile(self, new_hardware_profile):
|
||||
if new_hardware_profile is None:
|
||||
raise DesignError("Invalid HardwareProfile model")
|
||||
|
||||
if self.hardware_profiles is None:
|
||||
self.hardware_profiles = objects.HardwareProfileList()
|
||||
|
||||
self.hardware_profiles.append(new_hardware_profile)
|
||||
|
||||
def get_hardware_profile(self, profile_key):
|
||||
for p in self.hardware_profiles:
|
||||
if p.get_id() == profile_key:
|
||||
return p
|
||||
|
||||
raise DesignError("HardwareProfile %s not found in design state"
|
||||
% profile_key)
|
||||
|
||||
def add_baremetal_node(self, new_baremetal_node):
|
||||
if new_baremetal_node is None:
|
||||
raise DesignError("Invalid BaremetalNode model")
|
||||
|
||||
if self.baremetal_nodes is None:
|
||||
self.baremetal_nodes = objects.BaremetalNodeList()
|
||||
|
||||
self.baremetal_nodes.append(new_baremetal_node)
|
||||
|
||||
def get_baremetal_node(self, node_key):
|
||||
for n in self.baremetal_nodes:
|
||||
if n.get_id() == node_key:
|
||||
return n
|
||||
|
||||
raise DesignError("BaremetalNode %s not found in design state"
|
||||
% node_key)
|
||||
|
||||
"""
|
||||
Support filtering on rack name, node name or node tag
|
||||
for now. Each filter can be a comma-delimited list of
|
||||
values. The final result is an intersection of all the
|
||||
filters
|
||||
"""
|
||||
def get_filtered_nodes(self, node_filter):
|
||||
effective_nodes = self.baremetal_nodes
|
||||
|
||||
# filter by rack
|
||||
rack_filter = node_filter.get('rackname', None)
|
||||
|
||||
if rack_filter is not None:
|
||||
rack_list = rack_filter.split(',')
|
||||
effective_nodes = [x
|
||||
for x in effective_nodes
|
||||
if x.get_rack() in rack_list]
|
||||
# filter by name
|
||||
name_filter = node_filter.get('nodename', None)
|
||||
|
||||
if name_filter is not None:
|
||||
name_list = name_filter.split(',')
|
||||
effective_nodes = [x
|
||||
for x in effective_nodes
|
||||
if x.get_name() in name_list]
|
||||
# filter by tag
|
||||
tag_filter = node_filter.get('tags', None)
|
||||
|
||||
if tag_filter is not None:
|
||||
tag_list = tag_filter.split(',')
|
||||
effective_nodes = [x
|
||||
for x in effective_nodes
|
||||
for t in tag_list
|
||||
if x.has_tag(t)]
|
||||
|
||||
return effective_nodes
|
||||
|
@ -17,19 +17,19 @@ from threading import Lock
|
||||
|
||||
import helm_drydock.error as errors
|
||||
|
||||
import helm_drydock.enum as enum
|
||||
import helm_drydock.objects.fields as hd_fields
|
||||
|
||||
class Task(object):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
self.task_id = uuid.uuid4()
|
||||
self.status = enum.TaskStatus.Created
|
||||
self.status = hd_fields.TaskStatus.Created
|
||||
self.terminate = False
|
||||
self.subtasks = []
|
||||
self.lock_id = None
|
||||
self.result = enum.ActionResult.Incomplete
|
||||
self.result = hd_fields.ActionResult.Incomplete
|
||||
self.result_detail = None
|
||||
self.action = kwargs.get('action', enum.OrchestratorAction.Noop)
|
||||
self.action = kwargs.get('action', hd_fields.OrchestratorAction.Noop)
|
||||
|
||||
self.parent_task_id = kwargs.get('parent_task_id','')
|
||||
|
||||
@ -79,10 +79,10 @@ class OrchestratorTask(Task):
|
||||
|
||||
self.design_id = kwargs.get('design_id', 0)
|
||||
|
||||
if self.action in [enum.OrchestratorAction.VerifyNode,
|
||||
enum.OrchestratorAction.PrepareNode,
|
||||
enum.OrchestratorAction.DeployNode,
|
||||
enum.OrchestratorAction.DestroyNode]:
|
||||
if self.action in [hd_fields.OrchestratorAction.VerifyNode,
|
||||
hd_fields.OrchestratorAction.PrepareNode,
|
||||
hd_fields.OrchestratorAction.DeployNode,
|
||||
hd_fields.OrchestratorAction.DestroyNode]:
|
||||
self.node_filter = kwargs.get('node_filter', None)
|
||||
|
||||
|
@ -16,13 +16,12 @@ import time
|
||||
import threading
|
||||
import importlib
|
||||
|
||||
from enum import Enum, unique
|
||||
from copy import deepcopy
|
||||
|
||||
import helm_drydock.drivers as drivers
|
||||
import helm_drydock.model.task as tasks
|
||||
import helm_drydock.objects.task as tasks
|
||||
import helm_drydock.error as errors
|
||||
import helm_drydock.enum as enum
|
||||
import helm_drydock.objects.fields as hd_fields
|
||||
|
||||
class Orchestrator(object):
|
||||
|
||||
@ -86,13 +85,13 @@ class Orchestrator(object):
|
||||
|
||||
# Just for testing now, need to implement with enabled_drivers
|
||||
# logic
|
||||
if task.action == enum.OrchestratorAction.Noop:
|
||||
if task.action == hd_fields.OrchestratorAction.Noop:
|
||||
self.task_field_update(task_id,
|
||||
status=enum.TaskStatus.Running)
|
||||
status=hd_fields.TaskStatus.Running)
|
||||
|
||||
driver_task = self.create_task(tasks.DriverTask,
|
||||
design_id=0,
|
||||
action=enum.OrchestratorAction.Noop,
|
||||
action=hd_fields.OrchestratorAction.Noop,
|
||||
parent_task_id=task.get_id())
|
||||
|
||||
driver = drivers.ProviderDriver(state_manager=self.state_manager,
|
||||
@ -103,34 +102,33 @@ class Orchestrator(object):
|
||||
self.task_field_update(task_id, status=driver_task.get_status())
|
||||
|
||||
return
|
||||
elif task.action == enum.OrchestratorAction.ValidateDesign:
|
||||
elif task.action == hd_fields.OrchestratorAction.ValidateDesign:
|
||||
self.task_field_update(task_id,
|
||||
status=enum.TaskStatus.Running)
|
||||
status=hd_fields.TaskStatus.Running)
|
||||
try:
|
||||
site_design = self.get_effective_site(task_site,
|
||||
change_id=design_id)
|
||||
self.task_field_update(task_id,
|
||||
result=enum.ActionResult.Success)
|
||||
result=hd_fields.ActionResult.Success)
|
||||
except:
|
||||
self.task_field_update(task_id,
|
||||
result=enum.ActionResult.Failure)
|
||||
result=hd_fields.ActionResult.Failure)
|
||||
|
||||
self.task_field_update(task_id, status=enum.TaskStatus.Complete)
|
||||
self.task_field_update(task_id, status=hd_fields.TaskStatus.Complete)
|
||||
return
|
||||
elif task.action == enum.OrchestratorAction.VerifyNode:
|
||||
elif task.action == hd_fields.OrchestratorAction.VerifyNode:
|
||||
self.task_field_update(task_id,
|
||||
status=enum.TaskStatus.Running)
|
||||
status=hd_fields.TaskStatus.Running)
|
||||
|
||||
driver = self.enabled_drivers['oob']
|
||||
|
||||
if driver is None:
|
||||
self.task_field_update(task_id,
|
||||
status=enum.TaskStatus.Errored,
|
||||
result=enum.ActionResult.Failure)
|
||||
status=hd_fields.TaskStatus.Errored,
|
||||
result=hd_fields.ActionResult.Failure)
|
||||
return
|
||||
|
||||
site_design = self.get_effective_site(task_site,
|
||||
change_id=design_id)
|
||||
site_design = self.get_effective_site(design_id, task_site)
|
||||
|
||||
node_filter = task.node_filter
|
||||
|
||||
@ -144,7 +142,7 @@ class Orchestrator(object):
|
||||
driver_task = self.create_task(tasks.DriverTask,
|
||||
parent_task_id=task.get_id(),
|
||||
design_id=design_id,
|
||||
action=enum.OobAction.InterrogateNode,
|
||||
action=hd_fields.OrchestratorAction.InterrogateNode,
|
||||
task_scope=task_scope)
|
||||
|
||||
driver.execute_task(driver_task.get_id())
|
||||
@ -152,19 +150,19 @@ class Orchestrator(object):
|
||||
driver_task = self.state_manager.get_task(driver_task.get_id())
|
||||
|
||||
self.task_field_update(task_id,
|
||||
status=enum.TaskStatus.Complete,
|
||||
status=hd_fields.TaskStatus.Complete,
|
||||
result=driver_task.get_result())
|
||||
return
|
||||
elif task.action == enum.OrchestratorAction.PrepareNode:
|
||||
elif task.action == hd_fields.OrchestratorAction.PrepareNode:
|
||||
self.task_field_update(task_id,
|
||||
status=enum.TaskStatus.Running)
|
||||
status=hd_fields.TaskStatus.Running)
|
||||
|
||||
driver = self.enabled_drivers['oob']
|
||||
|
||||
if driver is None:
|
||||
self.task_field_update(task_id,
|
||||
status=enum.TaskStatus.Errored,
|
||||
result=enum.ActionResult.Failure)
|
||||
status=hd_fields.TaskStatus.Errored,
|
||||
result=hd_fields.ActionResult.Failure)
|
||||
return
|
||||
|
||||
site_design = self.get_effective_site(task_site,
|
||||
@ -182,7 +180,7 @@ class Orchestrator(object):
|
||||
setboot_task = self.create_task(tasks.DriverTask,
|
||||
parent_task_id=task.get_id(),
|
||||
design_id=design_id,
|
||||
action=enum.OobAction.SetNodeBoot,
|
||||
action=hd_fields.OrchestratorAction.SetNodeBoot,
|
||||
task_scope=task_scope)
|
||||
|
||||
driver.execute_task(setboot_task.get_id())
|
||||
@ -192,26 +190,26 @@ class Orchestrator(object):
|
||||
cycle_task = self.create_task(tasks.DriverTask,
|
||||
parent_task_id=task.get_id(),
|
||||
design_id=design_id,
|
||||
action=enum.OobAction.PowerCycleNode,
|
||||
action=hd_fields.OrchestratorAction.PowerCycleNode,
|
||||
task_scope=task_scope)
|
||||
driver.execute_task(cycle_task.get_id())
|
||||
|
||||
cycle_task = self.state_manager.get_task(cycle_task.get_id())
|
||||
|
||||
if (setboot_task.get_result() == enum.ActionResult.Success and
|
||||
cycle_task.get_result() == enum.ActionResult.Success):
|
||||
if (setboot_task.get_result() == hd_fields.ActionResult.Success and
|
||||
cycle_task.get_result() == hd_fields.ActionResult.Success):
|
||||
self.task_field_update(task_id,
|
||||
status=enum.TaskStatus.Complete,
|
||||
result=enum.ActionResult.Success)
|
||||
elif (setboot_task.get_result() == enum.ActionResult.Success or
|
||||
cycle_task.get_result() == enum.ActionResult.Success):
|
||||
status=hd_fields.TaskStatus.Complete,
|
||||
result=hd_fields.ActionResult.Success)
|
||||
elif (setboot_task.get_result() == hd_fields.ActionResult.Success or
|
||||
cycle_task.get_result() == hd_fields.ActionResult.Success):
|
||||
self.task_field_update(task_id,
|
||||
status=enum.TaskStatus.Complete,
|
||||
result=enum.ActionResult.PartialSuccess)
|
||||
status=hd_fields.TaskStatus.Complete,
|
||||
result=hd_fields.ActionResult.PartialSuccess)
|
||||
else:
|
||||
self.task_field_update(task_id,
|
||||
status=enum.TaskStatus.Complete,
|
||||
result=enum.ActionResult.Failure)
|
||||
status=hd_fields.TaskStatus.Complete,
|
||||
result=hd_fields.ActionResult.Failure)
|
||||
|
||||
return
|
||||
else:
|
||||
@ -279,74 +277,17 @@ class Orchestrator(object):
|
||||
else:
|
||||
return False
|
||||
|
||||
"""
|
||||
load_design_data - Pull all the defined models in statemgmt and assemble
|
||||
them into a representation of the site. Does not compute inheritance.
|
||||
Throws an exception if multiple Site models are found.
|
||||
|
||||
param design_state - Instance of statemgmt.DesignState to load data from
|
||||
|
||||
return a Site model populated with all components from the design state
|
||||
"""
|
||||
|
||||
def load_design_data(self, site_name, change_id=None):
|
||||
design_data = None
|
||||
|
||||
if change_id is None or change_id == 0:
|
||||
try:
|
||||
design_data = self.state_manager.get_design_base()
|
||||
except DesignError(e):
|
||||
raise e
|
||||
else:
|
||||
design_data = self.state_manager.get_design_change(change_id)
|
||||
|
||||
site = design_data.get_site(site_name)
|
||||
|
||||
networks = design_data.get_networks()
|
||||
|
||||
for n in networks:
|
||||
if n.site == site_name:
|
||||
site.networks.append(n)
|
||||
|
||||
network_links = design_data.get_network_links()
|
||||
|
||||
for l in network_links:
|
||||
if l.site == site_name:
|
||||
site.network_links.append(l)
|
||||
|
||||
host_profiles = design_data.get_host_profiles()
|
||||
|
||||
for p in host_profiles:
|
||||
if p.site == site_name:
|
||||
site.host_profiles.append(p)
|
||||
|
||||
hardware_profiles = design_data.get_hardware_profiles()
|
||||
|
||||
for p in hardware_profiles:
|
||||
if p.site == site_name:
|
||||
site.hardware_profiles.append(p)
|
||||
|
||||
baremetal_nodes = design_data.get_baremetal_nodes()
|
||||
|
||||
for n in baremetal_nodes:
|
||||
if n.site == site_name:
|
||||
site.baremetal_nodes.append(n)
|
||||
|
||||
return site
|
||||
|
||||
def compute_model_inheritance(self, site_root):
|
||||
def compute_model_inheritance(self, site_design):
|
||||
|
||||
# For now the only thing that really incorporates inheritance is
|
||||
# host profiles and baremetal nodes. So we'll just resolve it for
|
||||
# the baremetal nodes which recursively resolves it for host profiles
|
||||
# assigned to those nodes
|
||||
|
||||
site_copy = deepcopy(site_root)
|
||||
|
||||
for n in site_copy.baremetal_nodes:
|
||||
n.compile_applied_model(site_copy)
|
||||
for n in site_design.baremetal_nodes:
|
||||
n.compile_applied_model(site_design)
|
||||
|
||||
return site_copy
|
||||
return
|
||||
"""
|
||||
compute_model_inheritance - given a fully populated Site model,
|
||||
compute the effecitve design by applying inheritance and references
|
||||
@ -354,22 +295,22 @@ class Orchestrator(object):
|
||||
return a Site model reflecting the effective design for the site
|
||||
"""
|
||||
|
||||
def get_described_site(self, site_name, change_id=None):
|
||||
def get_described_site(self, design_id, site_name):
|
||||
site_design = None
|
||||
|
||||
if site_name is None:
|
||||
raise errors.OrchestratorError("Cannot source design for site None")
|
||||
|
||||
site_design = self.load_design_data(site_name, change_id=change_id)
|
||||
site_design = self.state_manager.get_design(design_id)
|
||||
|
||||
return site_design
|
||||
|
||||
def get_effective_site(self, site_name, change_id=None):
|
||||
site_design = self.get_described_site(site_name, change_id=change_id)
|
||||
def get_effective_site(self, design_id, site_name):
|
||||
site_design = self.get_described_site(design_id, site_name)
|
||||
|
||||
site_model = self.compute_model_inheritance(site_design)
|
||||
self.compute_model_inheritance(site_design)
|
||||
|
||||
return site_model
|
||||
return site_design
|
||||
|
||||
def process_node_filter(self, node_filter, site_design):
|
||||
target_nodes = site_design.baremetal_nodes
|
||||
@ -398,5 +339,3 @@ class Orchestrator(object):
|
||||
if x.has_tag(t)]
|
||||
|
||||
return target_nodes
|
||||
|
||||
|
||||
|
@ -18,23 +18,16 @@ from threading import Lock
|
||||
|
||||
import uuid
|
||||
|
||||
import helm_drydock.model.node as node
|
||||
import helm_drydock.model.hostprofile as hostprofile
|
||||
import helm_drydock.model.network as network
|
||||
import helm_drydock.model.site as site
|
||||
import helm_drydock.model.hwprofile as hwprofile
|
||||
import helm_drydock.model.task as tasks
|
||||
import helm_drydock.objects as objects
|
||||
import helm_drydock.objects.task as tasks
|
||||
|
||||
from helm_drydock.error import DesignError, StateError
|
||||
|
||||
class DesignState(object):
|
||||
|
||||
def __init__(self):
|
||||
self.design_base = None
|
||||
self.design_base_lock = Lock()
|
||||
|
||||
self.design_changes = []
|
||||
self.design_changes_lock = Lock()
|
||||
self.designs = {}
|
||||
self.designs_lock = Lock()
|
||||
|
||||
self.builds = []
|
||||
self.builds_lock = Lock()
|
||||
@ -46,78 +39,46 @@ class DesignState(object):
|
||||
|
||||
# TODO Need to lock a design base or change once implementation
|
||||
# has started
|
||||
def get_design_base(self):
|
||||
if self.design_base is None:
|
||||
raise DesignError("No design base submitted")
|
||||
def get_design(self, design_id):
|
||||
if design_id not in self.designs.keys():
|
||||
raise DesignError("Design ID %s not found" % (design_id))
|
||||
|
||||
return deepcopy(self.design_base)
|
||||
return objects.SiteDesign.obj_from_primitive(self.designs[design_id])
|
||||
|
||||
def post_design_base(self, site_design):
|
||||
if site_design is not None and isinstance(site_design, SiteDesign):
|
||||
my_lock = self.design_base_lock.acquire(blocking=True,
|
||||
def post_design(self, site_design):
|
||||
if site_design is not None:
|
||||
my_lock = self.designs_lock.acquire(blocking=True,
|
||||
timeout=10)
|
||||
if my_lock:
|
||||
self.design_base = deepcopy(site_design)
|
||||
self.design_base_lock.release()
|
||||
design_id = site_design.id
|
||||
if design_id not in self.designs.keys():
|
||||
self.designs[design_id] = site_design.obj_to_primitive()
|
||||
else:
|
||||
self.designs_lock.release()
|
||||
raise StateError("Design ID %s already exists" % design_id)
|
||||
self.designs_lock.release()
|
||||
return True
|
||||
raise StateError("Could not acquire lock")
|
||||
else:
|
||||
raise DesignError("Design change must be a SiteDesign instance")
|
||||
|
||||
def put_design_base(self, site_design):
|
||||
if site_design is not None and isinstance(site_design, SiteDesign):
|
||||
my_lock = self.design_base_lock.acquire(blocking=True,
|
||||
def put_design(self, site_design):
|
||||
if site_design is not None:
|
||||
my_lock = self.designs_lock.acquire(blocking=True,
|
||||
timeout=10)
|
||||
if my_lock:
|
||||
self.design_base.merge_updates(site_design)
|
||||
self.design_base_lock.release()
|
||||
return True
|
||||
design_id = site_design.id
|
||||
if design_id not in self.designs.keys():
|
||||
self.designs_lock.release()
|
||||
raise StateError("Design ID %s does not exist" % design_id)
|
||||
else:
|
||||
self.designs[design_id] = site_design.obj_to_primitive()
|
||||
self.designs_lock.release()
|
||||
return True
|
||||
raise StateError("Could not acquire lock")
|
||||
else:
|
||||
raise DesignError("Design base must be a SiteDesign instance")
|
||||
|
||||
def get_design_change(self, changeid):
|
||||
match = [x for x in self.design_changes if x.changeid == changeid]
|
||||
|
||||
if len(match) == 0:
|
||||
raise DesignError("No design change %s found." % (changeid))
|
||||
else:
|
||||
return deepcopy(match[0])
|
||||
|
||||
def post_design_change(self, site_design):
|
||||
if site_design is not None and isinstance(site_design, SiteDesign):
|
||||
my_lock = self.design_changes_lock.acquire(block=True,
|
||||
timeout=10)
|
||||
if my_lock:
|
||||
exists = [(x) for x
|
||||
in self.design_changes
|
||||
if x.changeid == site_design.changeid]
|
||||
if len(exists) > 0:
|
||||
self.design_changs_lock.release()
|
||||
raise DesignError("Existing change %s found" %
|
||||
(site_design.changeid))
|
||||
|
||||
self.design_changes.append(deepcopy(site_design))
|
||||
self.design_changes_lock.release()
|
||||
return True
|
||||
raise StateError("Could not acquire lock")
|
||||
else:
|
||||
raise DesignError("Design change must be a SiteDesign instance")
|
||||
|
||||
def put_design_change(self, site_design):
|
||||
if site_design is not None and isinstance(site_design, SiteDesign):
|
||||
my_lock = self.design_changes_lock.acquire(block=True,
|
||||
timeout=10)
|
||||
if my_lock:
|
||||
changeid = site_design.changeid
|
||||
for c in self.design_changes:
|
||||
if c.changeid == changeid:
|
||||
c.merge_updates(site_design)
|
||||
return True
|
||||
raise StateError("Could not acquire lock")
|
||||
else:
|
||||
raise DesignError("Design change must be a SiteDesign instance")
|
||||
|
||||
def get_current_build(self):
|
||||
latest_stamp = 0
|
||||
current_build = None
|
||||
@ -246,273 +207,3 @@ class DesignState(object):
|
||||
raise StateError("Could not acquire lock")
|
||||
|
||||
|
||||
class SiteDesign(object):
|
||||
|
||||
def __init__(self, ischange=False, changeid=None):
|
||||
if ischange:
|
||||
if changeid is not None:
|
||||
self.changeid = changeid
|
||||
else:
|
||||
self.changeid = uuid.uuid4()
|
||||
else:
|
||||
# Base design
|
||||
self.changeid = 0
|
||||
|
||||
self.sites = []
|
||||
self.networks = []
|
||||
self.network_links = []
|
||||
self.host_profiles = []
|
||||
self.hardware_profiles = []
|
||||
self.baremetal_nodes = []
|
||||
|
||||
def add_site(self, new_site):
|
||||
if new_site is None or not isinstance(new_site, site.Site):
|
||||
raise DesignError("Invalid Site model")
|
||||
|
||||
self.sites.append(new_site)
|
||||
|
||||
def update_site(self, update):
|
||||
if update is None or not isinstance(update, site.Site):
|
||||
raise DesignError("Invalid Site model")
|
||||
|
||||
for i, s in enumerate(self.sites):
|
||||
if s.get_name() == update.get_name():
|
||||
self.sites[i] = deepcopy(update)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def get_sites(self):
|
||||
return self.sites
|
||||
|
||||
def get_site(self, site_name):
|
||||
for s in self.sites:
|
||||
if s.name == site_name:
|
||||
return s
|
||||
|
||||
raise DesignError("Site %s not found in design state" % site_name)
|
||||
|
||||
def add_network(self, new_network):
|
||||
if new_network is None or not isinstance(new_network, network.Network):
|
||||
raise DesignError("Invalid Network model")
|
||||
|
||||
self.networks.append(new_network)
|
||||
|
||||
def update_network(self, update):
|
||||
if update is None or not isinstance(update, network.Network):
|
||||
raise DesignError("Invalid Network model")
|
||||
|
||||
for i, n in enumerate(self.networks):
|
||||
if n.get_name() == update.get_name():
|
||||
self.networks[i] = deepcopy(update)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def get_networks(self):
|
||||
return self.networks
|
||||
|
||||
def get_network(self, network_name):
|
||||
for n in self.networks:
|
||||
if n.name == network_name:
|
||||
return n
|
||||
|
||||
raise DesignError("Network %s not found in design state"
|
||||
% network_name)
|
||||
|
||||
def add_network_link(self, new_network_link):
|
||||
if new_network_link is None or not isinstance(new_network_link,
|
||||
network.NetworkLink):
|
||||
raise DesignError("Invalid NetworkLink model")
|
||||
|
||||
self.network_links.append(new_network_link)
|
||||
|
||||
def update_network_link(self, update):
|
||||
if update is None or not isinstance(update, network.NetworkLink):
|
||||
raise DesignError("Invalid NetworkLink model")
|
||||
|
||||
for i, n in enumerate(self.network_links):
|
||||
if n.get_name() == update.get_name():
|
||||
self.network_links[i] = deepcopy(update)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def get_network_links(self):
|
||||
return self.network_links
|
||||
|
||||
def get_network_link(self, link_name):
|
||||
for l in self.network_links:
|
||||
if l.name == link_name:
|
||||
return l
|
||||
|
||||
raise DesignError("NetworkLink %s not found in design state"
|
||||
% link_name)
|
||||
|
||||
def add_host_profile(self, new_host_profile):
|
||||
if new_host_profile is None or not isinstance(new_host_profile,
|
||||
hostprofile.HostProfile):
|
||||
raise DesignError("Invalid HostProfile model")
|
||||
|
||||
self.host_profiles.append(new_host_profile)
|
||||
|
||||
def update_host_profile(self, update):
|
||||
if update is None or not isinstance(update, hostprofile.HostProfile):
|
||||
raise DesignError("Invalid HostProfile model")
|
||||
|
||||
for i, h in enumerate(self.host_profiles):
|
||||
if h.get_name() == update.get_name():
|
||||
self.host_profiles[i] = deepcopy(h)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def get_host_profiles(self):
|
||||
return self.host_profiles
|
||||
|
||||
def get_host_profile(self, profile_name):
|
||||
for p in self.host_profiles:
|
||||
if p.name == profile_name:
|
||||
return p
|
||||
|
||||
raise DesignError("HostProfile %s not found in design state"
|
||||
% profile_name)
|
||||
|
||||
def add_hardware_profile(self, new_hardware_profile):
|
||||
if (new_hardware_profile is None or
|
||||
not isinstance(new_hardware_profile, hwprofile.HardwareProfile)):
|
||||
raise DesignError("Invalid HardwareProfile model")
|
||||
|
||||
self.hardware_profiles.append(new_hardware_profile)
|
||||
|
||||
def update_hardware_profile(self, update):
|
||||
if update is None or not isinstance(update, hwprofile.HardwareProfile):
|
||||
raise DesignError("Invalid HardwareProfile model")
|
||||
|
||||
for i, h in enumerate(self.hardware_profiles):
|
||||
if h.get_name() == update.get_name():
|
||||
self.hardware_profiles[i] = deepcopy(h)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def get_hardware_profiles(self):
|
||||
return self.hardware_profiles
|
||||
|
||||
def get_hardware_profile(self, profile_name):
|
||||
for p in self.hardware_profiles:
|
||||
if p.name == profile_name:
|
||||
return p
|
||||
|
||||
raise DesignError("HardwareProfile %s not found in design state"
|
||||
% profile_name)
|
||||
|
||||
def add_baremetal_node(self, new_baremetal_node):
|
||||
if (new_baremetal_node is None or
|
||||
not isinstance(new_baremetal_node, node.BaremetalNode)):
|
||||
raise DesignError("Invalid BaremetalNode model")
|
||||
|
||||
self.baremetal_nodes.append(new_baremetal_node)
|
||||
|
||||
def update_baremetal_node(self, update):
|
||||
if (update is None or not isinstance(update, node.BaremetalNode)):
|
||||
raise DesignError("Invalid BaremetalNode model")
|
||||
|
||||
for i, b in enumerate(self.baremetal_nodes):
|
||||
if b.get_name() == update.get_name():
|
||||
self.baremetal_nodes[i] = deepcopy(b)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def get_baremetal_nodes(self):
|
||||
return self.baremetal_nodes
|
||||
|
||||
def get_baremetal_node(self, node_name):
|
||||
for n in self.baremetal_nodes:
|
||||
if n.name == node_name:
|
||||
return n
|
||||
|
||||
raise DesignError("BaremetalNode %s not found in design state"
|
||||
% node_name)
|
||||
|
||||
# Only merge the design parts included in the updated site
|
||||
# design. Changes are merged at the part level, not for fields
|
||||
# within a design part
|
||||
#
|
||||
# TODO convert update_* methods to use exceptions and convert to try block
|
||||
def merge_updates(self, updates):
|
||||
if updates is not None and isinstance(updates, SiteDesign):
|
||||
if updates.changeid == self.changeid:
|
||||
for u in updates.sites:
|
||||
if not self.update_site(u):
|
||||
self.add_site(u)
|
||||
for u in updates.networks:
|
||||
if not self.update_network(u):
|
||||
self.add_network(u)
|
||||
for u in updates.network_links:
|
||||
if not self.update_network_link(u):
|
||||
self.add_network_link(u)
|
||||
for u in updates.host_profiles:
|
||||
if not self.update_host_profile(u):
|
||||
self.add_host_profile(u)
|
||||
for u in updates.hardware_profiles:
|
||||
if not self.update_hardware_profile(u):
|
||||
self.add_hardware_profile(u)
|
||||
for u in updates.baremetal_nodes:
|
||||
if not self.update_baremetal_node(u):
|
||||
self.add_baremetal_node(u)
|
||||
|
||||
|
||||
class SiteBuild(SiteDesign):
|
||||
|
||||
def __init__(self, build_id=None):
|
||||
super(SiteBuild, self).__init__()
|
||||
|
||||
if build_id is None:
|
||||
self.buildid = datetime.datetime.now(timezone.utc).timestamp()
|
||||
else:
|
||||
self.buildid = build_id
|
||||
|
||||
def get_filtered_nodes(self, node_filter):
|
||||
effective_nodes = self.get_baremetal_nodes()
|
||||
|
||||
# filter by rack
|
||||
rack_filter = node_filter.get('rackname', None)
|
||||
|
||||
if rack_filter is not None:
|
||||
rack_list = rack_filter.split(',')
|
||||
effective_nodes = [x
|
||||
for x in effective_nodes
|
||||
if x.get_rack() in rack_list]
|
||||
# filter by name
|
||||
name_filter = node_filter.get('nodename', None)
|
||||
|
||||
if name_filter is not None:
|
||||
name_list = name_filter.split(',')
|
||||
effective_nodes = [x
|
||||
for x in effective_nodes
|
||||
if x.get_name() in name_list]
|
||||
# filter by tag
|
||||
tag_filter = node_filter.get('tags', None)
|
||||
|
||||
if tag_filter is not None:
|
||||
tag_list = tag_filter.split(',')
|
||||
effective_nodes = [x
|
||||
for x in effective_nodes
|
||||
for t in tag_list
|
||||
if x.has_tag(t)]
|
||||
|
||||
return effective_nodes
|
||||
"""
|
||||
Support filtering on rack name, node name or node tag
|
||||
for now. Each filter can be a comma-delimited list of
|
||||
values. The final result is an intersection of all the
|
||||
filters
|
||||
"""
|
||||
|
||||
def set_nodes_status(self, node_filter, status):
|
||||
target_nodes = self.get_filtered_nodes(node_filter)
|
||||
|
||||
for n in target_nodes:
|
||||
n.set_status(status)
|
||||
|
7
setup.py
7
setup.py
@ -40,7 +40,7 @@ setup(name='helm_drydock',
|
||||
author_email='sh8121@att.com',
|
||||
license='Apache 2.0',
|
||||
packages=['helm_drydock',
|
||||
'helm_drydock.model',
|
||||
'helm_drydock.objects',
|
||||
'helm_drydock.ingester',
|
||||
'helm_drydock.ingester.plugins',
|
||||
'helm_drydock.statemgmt',
|
||||
@ -55,8 +55,9 @@ setup(name='helm_drydock',
|
||||
'requests-oauthlib',
|
||||
'pyghmi>=1.0.18',
|
||||
'netaddr',
|
||||
'pecan',
|
||||
'webob'
|
||||
'falcon',
|
||||
'webob',
|
||||
'oslo.versionedobjects>=1.23.0',
|
||||
],
|
||||
dependency_link=[
|
||||
'git+https://github.com/maas/python-libmaas.git'
|
||||
|
@ -1,4 +1,5 @@
|
||||
pytest-mock
|
||||
pytest
|
||||
mock
|
||||
tox
|
||||
tox
|
||||
oslo.versionedobjects[fixtures]>=1.23.0
|
@ -68,7 +68,6 @@ class TestClass(object):
|
||||
|
||||
return design_state
|
||||
|
||||
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def input_files(self, tmpdir_factory, request):
|
||||
|
@ -13,7 +13,8 @@
|
||||
# limitations under the License.
|
||||
|
||||
from helm_drydock.ingester import Ingester
|
||||
from helm_drydock.statemgmt import DesignState, SiteDesign
|
||||
from helm_drydock.statemgmt import DesignState
|
||||
import helm_drydock.objects as objects
|
||||
|
||||
import pytest
|
||||
import shutil
|
||||
@ -26,36 +27,43 @@ class TestClass(object):
|
||||
print("Running test {0}".format(method.__name__))
|
||||
|
||||
def test_ingest_full_site(self, input_files):
|
||||
objects.register_all()
|
||||
|
||||
input_file = input_files.join("fullsite.yaml")
|
||||
|
||||
design_state = DesignState()
|
||||
design_data = SiteDesign()
|
||||
design_state.post_design_base(design_data)
|
||||
design_data = objects.SiteDesign()
|
||||
design_id = design_data.assign_id()
|
||||
design_state.post_design(design_data)
|
||||
|
||||
ingester = Ingester()
|
||||
ingester.enable_plugins([helm_drydock.ingester.plugins.yaml.YamlIngester])
|
||||
ingester.ingest_data(plugin_name='yaml', design_state=design_state, filenames=[str(input_file)])
|
||||
ingester.ingest_data(plugin_name='yaml', design_state=design_state,
|
||||
filenames=[str(input_file)], design_id=design_id)
|
||||
|
||||
design_data = design_state.get_design_base()
|
||||
design_data = design_state.get_design(design_id)
|
||||
|
||||
assert len(design_data.get_host_profiles()) == 3
|
||||
assert len(design_data.get_baremetal_nodes()) == 2
|
||||
assert len(design_data.host_profiles) == 3
|
||||
assert len(design_data.baremetal_nodes) == 2
|
||||
|
||||
def test_ingest_federated_design(self, input_files):
|
||||
objects.register_all()
|
||||
|
||||
profiles_file = input_files.join("fullsite_profiles.yaml")
|
||||
networks_file = input_files.join("fullsite_networks.yaml")
|
||||
nodes_file = input_files.join("fullsite_nodes.yaml")
|
||||
|
||||
design_state = DesignState()
|
||||
design_data = SiteDesign()
|
||||
design_state.post_design_base(design_data)
|
||||
design_data = objects.SiteDesign()
|
||||
design_id = design_data.assign_id()
|
||||
design_state.post_design(design_data)
|
||||
|
||||
ingester = Ingester()
|
||||
ingester.enable_plugins([helm_drydock.ingester.plugins.yaml.YamlIngester])
|
||||
ingester.ingest_data(plugin_name='yaml', design_state=design_state,
|
||||
filenames=[str(profiles_file), str(networks_file), str(nodes_file)])
|
||||
ingester.ingest_data(plugin_name='yaml', design_state=design_state, design_id=design_id,
|
||||
filenames=[str(profiles_file), str(networks_file), str(nodes_file)])
|
||||
|
||||
design_data = design_state.get_design_base()
|
||||
design_data = design_state.get_design(design_id)
|
||||
|
||||
assert len(design_data.host_profiles) == 3
|
||||
|
||||
|
@ -11,11 +11,12 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from helm_drydock.ingester.plugins.yaml import YamlIngester
|
||||
import pytest
|
||||
import shutil
|
||||
import os
|
||||
import uuid
|
||||
|
||||
from helm_drydock.ingester.plugins.yaml import YamlIngester
|
||||
|
||||
class TestClass(object):
|
||||
|
||||
|
@ -13,57 +13,73 @@
|
||||
# limitations under the License.
|
||||
|
||||
import pytest
|
||||
import yaml
|
||||
from helm_drydock.model.hwprofile import HardwareProfile
|
||||
|
||||
import helm_drydock.objects as objects
|
||||
from helm_drydock.objects import fields
|
||||
|
||||
class TestClass(object):
|
||||
|
||||
def setup_method(self, method):
|
||||
print("Running test {0}".format(method.__name__))
|
||||
|
||||
def test_hardwareprofile(self):
|
||||
yaml_snippet = ("---\n"
|
||||
"apiVersion: 'v1.0'\n"
|
||||
"kind: HardwareProfile\n"
|
||||
"metadata:\n"
|
||||
" name: HPGen8v3\n"
|
||||
" region: sitename\n"
|
||||
" date: 17-FEB-2017\n"
|
||||
" name: Sample hardware definition\n"
|
||||
" author: Scott Hussey\n"
|
||||
"spec:\n"
|
||||
" # Vendor of the server chassis\n"
|
||||
" vendor: HP\n"
|
||||
" # Generation of the chassis model\n"
|
||||
" generation: '8'\n"
|
||||
" # Version of the chassis model within its generation - not version of the hardware definition\n"
|
||||
" hw_version: '3'\n"
|
||||
" # The certified version of the chassis BIOS\n"
|
||||
" bios_version: '2.2.3'\n"
|
||||
" # Mode of the default boot of hardware - bios, uefi\n"
|
||||
" boot_mode: bios\n"
|
||||
" # Protocol of boot of the hardware - pxe, usb, hdd\n"
|
||||
" bootstrap_protocol: pxe\n"
|
||||
" # Which interface to use for network booting within the OOB manager, not OS device\n"
|
||||
" pxe_interface: 0\n"
|
||||
" # Map hardware addresses to aliases/roles to allow a mix of hardware configs\n"
|
||||
" # in a site to result in a consistent configuration\n"
|
||||
" device_aliases:\n"
|
||||
" pci:\n"
|
||||
" - address: pci@0000:00:03.0\n"
|
||||
" alias: prim_nic01\n"
|
||||
" # type could identify expected hardware - used for hardware manifest validation\n"
|
||||
" type: '82540EM Gigabit Ethernet Controller'\n"
|
||||
" - address: pci@0000:00:04.0\n"
|
||||
" alias: prim_nic02\n"
|
||||
" type: '82540EM Gigabit Ethernet Controller'\n"
|
||||
" scsi:\n"
|
||||
" - address: scsi@2:0.0.0\n"
|
||||
" alias: primary_boot\n"
|
||||
" type: 'VBOX HARDDISK'\n")
|
||||
objects.register_all()
|
||||
|
||||
hw_profile = yaml.load(yaml_snippet)
|
||||
hw_profile_model = HardwareProfile(**hw_profile)
|
||||
model_attr = {
|
||||
'versioned_object.namespace': 'helm_drydock.objects',
|
||||
'versioned_object.name': 'HardwareProfile',
|
||||
'versioned_object.version': '1.0',
|
||||
'versioned_object.data': {
|
||||
'name': 'server',
|
||||
'source': fields.ModelSource.Designed,
|
||||
'site': 'test_site',
|
||||
'vendor': 'Acme',
|
||||
'generation': '9',
|
||||
'hw_version': '3',
|
||||
'bios_version': '2.1.1',
|
||||
'boot_mode': 'bios',
|
||||
'bootstrap_protocol': 'pxe',
|
||||
'pxe_interface': '0',
|
||||
'devices': {
|
||||
'versioned_object.namespace': 'helm_drydock.objects',
|
||||
'versioned_object.name': 'HardwareDeviceAliasList',
|
||||
'versioned_object.version': '1.0',
|
||||
'versioned_object.data': {
|
||||
'objects': [
|
||||
{
|
||||
'versioned_object.namespace': 'helm_drydock.objects',
|
||||
'versioned_object.name': 'HardwareDeviceAlias',
|
||||
'versioned_object.version': '1.0',
|
||||
'versioned_object.data': {
|
||||
'alias': 'nic',
|
||||
'source': fields.ModelSource.Designed,
|
||||
'address': '0000:00:03.0',
|
||||
'bus_type': 'pci',
|
||||
'dev_type': '82540EM Gigabit Ethernet Controller',
|
||||
}
|
||||
},
|
||||
{
|
||||
'versioned_object.namespace': 'helm_drydock.objects',
|
||||
'versioned_object.name': 'HardwareDeviceAlias',
|
||||
'versioned_object.version': '1.0',
|
||||
'versioned_object.data': {
|
||||
'alias': 'bootdisk',
|
||||
'source': fields.ModelSource.Designed,
|
||||
'address': '2:0.0.0',
|
||||
'bus_type': 'scsi',
|
||||
'dev_type': 'SSD',
|
||||
}
|
||||
},
|
||||
]
|
||||
|
||||
assert hasattr(hw_profile_model, 'bootstrap_protocol')
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
hwprofile = objects.HardwareProfile.obj_from_primitive(model_attr)
|
||||
|
||||
assert getattr(hwprofile, 'bootstrap_protocol') == 'pxe'
|
||||
|
||||
hwprofile.bootstrap_protocol = 'network'
|
||||
|
||||
assert 'bootstrap_protocol' in hwprofile.obj_what_changed()
|
||||
assert 'bios_version' not in hwprofile.obj_what_changed()
|
||||
|
||||
|
@ -15,15 +15,16 @@
|
||||
#
|
||||
# Generic testing for the orchestrator
|
||||
#
|
||||
|
||||
import helm_drydock.orchestrator as orch
|
||||
import helm_drydock.enum as enum
|
||||
import helm_drydock.statemgmt as statemgmt
|
||||
import helm_drydock.model.task as task
|
||||
import helm_drydock.drivers as drivers
|
||||
import threading
|
||||
import time
|
||||
|
||||
import helm_drydock.orchestrator as orch
|
||||
import helm_drydock.objects.fields as hd_fields
|
||||
import helm_drydock.statemgmt as statemgmt
|
||||
import helm_drydock.objects.task as task
|
||||
import helm_drydock.drivers as drivers
|
||||
|
||||
|
||||
class TestClass(object):
|
||||
|
||||
def test_task_complete(self):
|
||||
@ -31,24 +32,24 @@ class TestClass(object):
|
||||
orchestrator = orch.Orchestrator(state_manager=state_mgr)
|
||||
orch_task = orchestrator.create_task(task.OrchestratorTask,
|
||||
site='default',
|
||||
action=enum.OrchestratorAction.Noop)
|
||||
action=hd_fields.OrchestratorAction.Noop)
|
||||
|
||||
orchestrator.execute_task(orch_task.get_id())
|
||||
|
||||
orch_task = state_mgr.get_task(orch_task.get_id())
|
||||
|
||||
assert orch_task.get_status() == enum.TaskStatus.Complete
|
||||
assert orch_task.get_status() == hd_fields.TaskStatus.Complete
|
||||
|
||||
for t_id in orch_task.subtasks:
|
||||
t = state_mgr.get_task(t_id)
|
||||
assert t.get_status() == enum.TaskStatus.Complete
|
||||
assert t.get_status() == hd_fields.TaskStatus.Complete
|
||||
|
||||
def test_task_termination(self):
|
||||
state_mgr = statemgmt.DesignState()
|
||||
orchestrator = orch.Orchestrator(state_manager=state_mgr)
|
||||
orch_task = orchestrator.create_task(task.OrchestratorTask,
|
||||
site='default',
|
||||
action=enum.OrchestratorAction.Noop)
|
||||
action=hd_fields.OrchestratorAction.Noop)
|
||||
|
||||
orch_thread = threading.Thread(target=orchestrator.execute_task,
|
||||
args=(orch_task.get_id(),))
|
||||
@ -61,8 +62,8 @@ class TestClass(object):
|
||||
time.sleep(1)
|
||||
|
||||
orch_task = state_mgr.get_task(orch_task.get_id())
|
||||
assert orch_task.get_status() == enum.TaskStatus.Terminated
|
||||
assert orch_task.get_status() == hd_fields.TaskStatus.Terminated
|
||||
|
||||
for t_id in orch_task.subtasks:
|
||||
t = state_mgr.get_task(t_id)
|
||||
assert t.get_status() == enum.TaskStatus.Terminated
|
||||
assert t.get_status() == hd_fields.TaskStatus.Terminated
|
@ -21,18 +21,21 @@ import pytest
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import uuid
|
||||
|
||||
from helm_drydock.ingester import Ingester
|
||||
|
||||
import helm_drydock.orchestrator as orch
|
||||
import helm_drydock.enum as enum
|
||||
import helm_drydock.objects.fields as hd_fields
|
||||
import helm_drydock.statemgmt as statemgmt
|
||||
import helm_drydock.model.task as task
|
||||
import helm_drydock.objects as objects
|
||||
import helm_drydock.objects.task as task
|
||||
import helm_drydock.drivers as drivers
|
||||
import helm_drydock.ingester.plugins.yaml as yaml_ingester
|
||||
|
||||
class TestClass(object):
|
||||
|
||||
design_id = str(uuid.uuid4())
|
||||
|
||||
# sthussey None of these work right until I figure out correct
|
||||
# mocking of pyghmi
|
||||
@ -45,7 +48,8 @@ class TestClass(object):
|
||||
|
||||
orch_task = orchestrator.create_task(task.OrchestratorTask,
|
||||
site='sitename',
|
||||
action=enum.OrchestratorAction.VerifyNode)
|
||||
design_id=self.design_id,
|
||||
action=hd_fields.OrchestratorAction.VerifyNode)
|
||||
|
||||
orchestrator.execute_task(orch_task.get_id())
|
||||
|
||||
@ -73,15 +77,19 @@ class TestClass(object):
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def loaded_design(self, input_files):
|
||||
objects.register_all()
|
||||
|
||||
input_file = input_files.join("oob.yaml")
|
||||
|
||||
design_state = statemgmt.DesignState()
|
||||
design_data = statemgmt.SiteDesign()
|
||||
design_state.post_design_base(design_data)
|
||||
design_data = objects.SiteDesign(id=self.design_id)
|
||||
|
||||
design_state.post_design(design_data)
|
||||
|
||||
ingester = Ingester()
|
||||
ingester.enable_plugins([yaml_ingester.YamlIngester])
|
||||
ingester.ingest_data(plugin_name='yaml', design_state=design_state, filenames=[str(input_file)])
|
||||
ingester.ingest_data(plugin_name='yaml', design_state=design_state,
|
||||
design_id=self.design_id, filenames=[str(input_file)])
|
||||
|
||||
return design_state
|
||||
|
||||
|
@ -11,52 +11,38 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from helm_drydock.statemgmt import SiteDesign
|
||||
|
||||
import helm_drydock.model.site as site
|
||||
import helm_drydock.model.network as network
|
||||
|
||||
import pytest
|
||||
import shutil
|
||||
import os
|
||||
import helm_drydock.ingester.plugins.yaml
|
||||
|
||||
|
||||
import helm_drydock.objects as objects
|
||||
import helm_drydock.statemgmt as statemgmt
|
||||
|
||||
class TestClass(object):
|
||||
|
||||
def setup_method(self, method):
|
||||
print("Running test {0}".format(method.__name__))
|
||||
|
||||
def test_sitedesign_merge(self):
|
||||
design_data = SiteDesign()
|
||||
def test_sitedesign_post(self):
|
||||
objects.register_all()
|
||||
|
||||
initial_site = site.Site(**{'apiVersion': 'v1.0',
|
||||
'metadata': {
|
||||
'name': 'testsite',
|
||||
},
|
||||
})
|
||||
net_a = network.Network(**{ 'apiVersion': 'v1.0',
|
||||
'metadata': {
|
||||
'name': 'net_a',
|
||||
'region': 'testsite',
|
||||
},
|
||||
'spec': {
|
||||
'cidr': '172.16.0.0/24',
|
||||
}})
|
||||
net_b = network.Network(**{ 'apiVersion': 'v1.0',
|
||||
'metadata': {
|
||||
'name': 'net_b',
|
||||
'region': 'testsite',
|
||||
},
|
||||
'spec': {
|
||||
'cidr': '172.16.0.1/24',
|
||||
}})
|
||||
state_manager = statemgmt.DesignState()
|
||||
design_data = objects.SiteDesign()
|
||||
design_id = design_data.assign_id()
|
||||
|
||||
design_data.add_site(initial_site)
|
||||
initial_site = objects.Site()
|
||||
initial_site.name = 'testsite'
|
||||
|
||||
net_a = objects.Network()
|
||||
net_a.name = 'net_a'
|
||||
net_a.region = 'testsite'
|
||||
net_a.cidr = '172.16.0.0/24'
|
||||
|
||||
design_data.set_site(initial_site)
|
||||
design_data.add_network(net_a)
|
||||
|
||||
design_update = SiteDesign()
|
||||
design_update.add_network(net_b)
|
||||
state_manager.post_design(design_data)
|
||||
|
||||
design_data.merge_updates(design_update)
|
||||
my_design = state_manager.get_design(design_id)
|
||||
|
||||
assert len(design_data.get_networks()) == 2
|
||||
assert design_data.obj_to_primitive() == my_design.obj_to_primitive()
|
@ -37,11 +37,11 @@ metadata:
|
||||
description: Describe layer 1 attributes. Primary key is 'name'. These settings will generally be things the switch and server have to agree on
|
||||
spec:
|
||||
bonding:
|
||||
mode: none
|
||||
mode: disabled
|
||||
mtu: 1500
|
||||
linkspeed: 100full
|
||||
trunking:
|
||||
mode: none
|
||||
mode: disabled
|
||||
default_network: oob
|
||||
---
|
||||
# pxe is a bit of 'magic' indicating the link config used when PXE booting
|
||||
@ -57,14 +57,14 @@ metadata:
|
||||
description: Describe layer 1 attributes. Primary key is 'name'. These settings will generally be things the switch and server have to agree on
|
||||
spec:
|
||||
bonding:
|
||||
mode: none
|
||||
mode: disabled
|
||||
mtu: 1500
|
||||
linkspeed: auto
|
||||
# Is this link supporting multiple layer 2 networks?
|
||||
# none is a port-based VLAN identified by default_network
|
||||
# tagged is is using 802.1q VLAN tagging. Untagged packets will default to default_netwokr
|
||||
trunking:
|
||||
mode: none
|
||||
mode: disabled
|
||||
# use name, will translate to VLAN ID
|
||||
default_network: pxe
|
||||
---
|
||||
@ -91,14 +91,11 @@ spec:
|
||||
hash: layer3+4
|
||||
# 802.3ad specific options
|
||||
peer_rate: slow
|
||||
mon_rate: default
|
||||
up_delay: default
|
||||
down_delay: default
|
||||
mtu: 9000
|
||||
linkspeed: auto
|
||||
# Is this link supporting multiple layer 2 networks?
|
||||
trunking:
|
||||
mode: tagged
|
||||
mode: 802.1q
|
||||
default_network: mgmt
|
||||
---
|
||||
apiVersion: 'v1.0'
|
||||
@ -446,15 +443,17 @@ spec:
|
||||
# Map hardware addresses to aliases/roles to allow a mix of hardware configs
|
||||
# in a site to result in a consistent configuration
|
||||
device_aliases:
|
||||
pci:
|
||||
- address: pci@0000:00:03.0
|
||||
alias: prim_nic01
|
||||
- address: '0000:00:03.0'
|
||||
alias: prim_nic01
|
||||
# type could identify expected hardware - used for hardware manifest validation
|
||||
type: '82540EM Gigabit Ethernet Controller'
|
||||
- address: pci@0000:00:04.0
|
||||
alias: prim_nic02
|
||||
type: '82540EM Gigabit Ethernet Controller'
|
||||
scsi:
|
||||
- address: scsi@2:0.0.0
|
||||
alias: primary_boot
|
||||
type: 'VBOX HARDDISK'
|
||||
dev_type: '82540EM Gigabit Ethernet Controller'
|
||||
bus_type: 'pci'
|
||||
- address: '0000:00:04.0'
|
||||
alias: prim_nic02
|
||||
dev_type: '82540EM Gigabit Ethernet Controller'
|
||||
bus_type: 'pci'
|
||||
- address: '2:0.0.0'
|
||||
alias: primary_boot
|
||||
dev_type: 'VBOX HARDDISK'
|
||||
bus_type: 'scsi'
|
||||
|
||||
|
@ -28,11 +28,11 @@ metadata:
|
||||
description: Describe layer 1 attributes. Primary key is 'name'. These settings will generally be things the switch and server have to agree on
|
||||
spec:
|
||||
bonding:
|
||||
mode: none
|
||||
mode: disabled
|
||||
mtu: 1500
|
||||
linkspeed: 100full
|
||||
trunking:
|
||||
mode: none
|
||||
mode: disabled
|
||||
default_network: oob
|
||||
---
|
||||
# pxe is a bit of 'magic' indicating the link config used when PXE booting
|
||||
@ -48,14 +48,14 @@ metadata:
|
||||
description: Describe layer 1 attributes. Primary key is 'name'. These settings will generally be things the switch and server have to agree on
|
||||
spec:
|
||||
bonding:
|
||||
mode: none
|
||||
mode: disabled
|
||||
mtu: 1500
|
||||
linkspeed: auto
|
||||
# Is this link supporting multiple layer 2 networks?
|
||||
# none is a port-based VLAN identified by default_network
|
||||
# tagged is is using 802.1q VLAN tagging. Untagged packets will default to default_netwokr
|
||||
trunking:
|
||||
mode: none
|
||||
mode: disabled
|
||||
# use name, will translate to VLAN ID
|
||||
default_network: pxe
|
||||
---
|
||||
@ -82,14 +82,11 @@ spec:
|
||||
hash: layer3+4
|
||||
# 802.3ad specific options
|
||||
peer_rate: slow
|
||||
mon_rate: default
|
||||
up_delay: default
|
||||
down_delay: default
|
||||
mtu: 9000
|
||||
linkspeed: auto
|
||||
# Is this link supporting multiple layer 2 networks?
|
||||
trunking:
|
||||
mode: tagged
|
||||
mode: 802.1q
|
||||
default_network: mgmt
|
||||
---
|
||||
apiVersion: 'v1.0'
|
||||
|
@ -48,7 +48,8 @@ spec:
|
||||
- network: public
|
||||
address: 172.16.3.20
|
||||
metadata:
|
||||
roles: os_ctl
|
||||
tags:
|
||||
- os_ctl
|
||||
rack: rack01
|
||||
---
|
||||
apiVersion: 'v1.0'
|
||||
|
@ -182,15 +182,16 @@ spec:
|
||||
# Map hardware addresses to aliases/roles to allow a mix of hardware configs
|
||||
# in a site to result in a consistent configuration
|
||||
device_aliases:
|
||||
pci:
|
||||
- address: pci@0000:00:03.0
|
||||
alias: prim_nic01
|
||||
- address: 0000:00:03.0
|
||||
alias: prim_nic01
|
||||
# type could identify expected hardware - used for hardware manifest validation
|
||||
type: '82540EM Gigabit Ethernet Controller'
|
||||
- address: pci@0000:00:04.0
|
||||
alias: prim_nic02
|
||||
type: '82540EM Gigabit Ethernet Controller'
|
||||
scsi:
|
||||
- address: scsi@2:0.0.0
|
||||
alias: primary_boot
|
||||
type: 'VBOX HARDDISK'
|
||||
dev_type: '82540EM Gigabit Ethernet Controller'
|
||||
bus_type: 'pci'
|
||||
- address: 0000:00:04.0
|
||||
alias: prim_nic02
|
||||
dev_type: '82540EM Gigabit Ethernet Controller'
|
||||
bus_type: 'pci'
|
||||
- address: 2:0.0.0
|
||||
alias: primary_boot
|
||||
dev_type: 'VBOX HARDDISK'
|
||||
bus_type: 'scsi'
|
||||
|
@ -10,11 +10,11 @@ metadata:
|
||||
description: Describe layer 1 attributes. Primary key is 'name'. These settings will generally be things the switch and server have to agree on
|
||||
spec:
|
||||
bonding:
|
||||
mode: none
|
||||
mode: disabled
|
||||
mtu: 1500
|
||||
linkspeed: 100full
|
||||
trunking:
|
||||
mode: none
|
||||
mode: disabled
|
||||
default_network: oob
|
||||
---
|
||||
# pxe is a bit of 'magic' indicating the link config used when PXE booting
|
||||
@ -31,14 +31,14 @@ metadata:
|
||||
description: Describe layer 1 attributes. Primary key is 'name'. These settings will generally be things the switch and server have to agree on
|
||||
spec:
|
||||
bonding:
|
||||
mode: none
|
||||
mode: disabled
|
||||
mtu: 1500
|
||||
linkspeed: auto
|
||||
# Is this link supporting multiple layer 2 networks?
|
||||
# none is a port-based VLAN identified by default_network
|
||||
# tagged is is using 802.1q VLAN tagging. Untagged packets will default to default_netwokr
|
||||
trunking:
|
||||
mode: none
|
||||
mode: disabled
|
||||
# use name, will translate to VLAN ID
|
||||
default_network: pxe
|
||||
---
|
||||
@ -61,17 +61,14 @@ spec:
|
||||
# balance-rr
|
||||
# Can add support for others down the road
|
||||
bonding:
|
||||
mode: 802.3ad
|
||||
mode: '802.3ad'
|
||||
# For LACP (802.3ad) xmit hashing policy: layer2, layer2+3, layer3+4, encap3+4
|
||||
hash: layer3+4
|
||||
# 802.3ad specific options
|
||||
peer_rate: slow
|
||||
mon_rate: default
|
||||
up_delay: default
|
||||
down_delay: default
|
||||
mtu: 9000
|
||||
linkspeed: auto
|
||||
# Is this link supporting multiple layer 2 networks?
|
||||
trunking:
|
||||
mode: tagged
|
||||
mode: '802.1q'
|
||||
default_network: mgmt
|
@ -37,11 +37,11 @@ metadata:
|
||||
description: Describe layer 1 attributes. Primary key is 'name'. These settings will generally be things the switch and server have to agree on
|
||||
spec:
|
||||
bonding:
|
||||
mode: none
|
||||
mode: disabled
|
||||
mtu: 1500
|
||||
linkspeed: 100full
|
||||
trunking:
|
||||
mode: none
|
||||
mode: disabled
|
||||
default_network: oob
|
||||
---
|
||||
apiVersion: 'v1.0'
|
||||
@ -212,15 +212,16 @@ spec:
|
||||
# Map hardware addresses to aliases/roles to allow a mix of hardware configs
|
||||
# in a site to result in a consistent configuration
|
||||
device_aliases:
|
||||
pci:
|
||||
- address: pci@0000:00:03.0
|
||||
alias: prim_nic01
|
||||
# type could identify expected hardware - used for hardware manifest validation
|
||||
type: '82540EM Gigabit Ethernet Controller'
|
||||
- address: pci@0000:00:04.0
|
||||
alias: prim_nic02
|
||||
type: '82540EM Gigabit Ethernet Controller'
|
||||
scsi:
|
||||
- address: scsi@2:0.0.0
|
||||
alias: primary_boot
|
||||
type: 'VBOX HARDDISK'
|
||||
- address: 0000:00:03.0
|
||||
alias: prim_nic01
|
||||
# type could identify expected hardware - used for hardware manifest validation
|
||||
dev_type: '82540EM Gigabit Ethernet Controller'
|
||||
bus_type: 'pci'
|
||||
- address: 0000:00:04.0
|
||||
alias: prim_nic02
|
||||
dev_type: '82540EM Gigabit Ethernet Controller'
|
||||
bus_type: 'pci'
|
||||
- address: 2:0.0.0
|
||||
alias: primary_boot
|
||||
dev_type: 'VBOX HARDDISK'
|
||||
bus_type: 'scsi'
|
||||
|
@ -25,15 +25,16 @@ spec:
|
||||
# Map hardware addresses to aliases/roles to allow a mix of hardware configs
|
||||
# in a site to result in a consistent configuration
|
||||
device_aliases:
|
||||
pci:
|
||||
- address: pci@0000:00:03.0
|
||||
alias: prim_nic01
|
||||
- address: 0000:00:03.0
|
||||
alias: prim_nic01
|
||||
# type could identify expected hardware - used for hardware manifest validation
|
||||
type: '82540EM Gigabit Ethernet Controller'
|
||||
- address: pci@0000:00:04.0
|
||||
alias: prim_nic02
|
||||
type: '82540EM Gigabit Ethernet Controller'
|
||||
scsi:
|
||||
- address: scsi@2:0.0.0
|
||||
alias: primary_boot
|
||||
type: 'VBOX HARDDISK'
|
||||
dev_type: '82540EM Gigabit Ethernet Controller'
|
||||
bus_type: 'pci'
|
||||
- address: 0000:00:04.0
|
||||
alias: prim_nic02
|
||||
dev_type: '82540EM Gigabit Ethernet Controller'
|
||||
bus_type: 'pci'
|
||||
- address: 2:0.0.0
|
||||
alias: primary_boot
|
||||
dev_type: 'VBOX HARDDISK'
|
||||
bus_type: 'scsi'
|
Loading…
Reference in New Issue
Block a user