Fixed Tempest test due to notification issues
Change-Id: I33a0764060600b8e3d6bec757669490b9003b345
This commit is contained in:
parent
74989fe94e
commit
72e6564549
@ -336,6 +336,10 @@ class ClusterStateNotDefined(WatcherException):
|
||||
msg_fmt = _("The cluster state is not defined")
|
||||
|
||||
|
||||
class CapacityNotDefined(WatcherException):
|
||||
msg_fmt = _("The capacity %(capacity)s is not defined for '%(resource)s'")
|
||||
|
||||
|
||||
class NoAvailableStrategyForGoal(WatcherException):
|
||||
msg_fmt = _("No strategy could be found to achieve the '%(goal)s' goal.")
|
||||
|
||||
|
@ -26,6 +26,8 @@ import cinderclient.exceptions as ciexceptions
|
||||
import novaclient.exceptions as nvexceptions
|
||||
|
||||
from watcher.common import clients
|
||||
from watcher.common import exception
|
||||
from watcher.common import utils
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
@ -43,6 +45,24 @@ class NovaHelper(object):
|
||||
def get_compute_node_list(self):
|
||||
return self.nova.hypervisors.list()
|
||||
|
||||
def get_compute_node_by_id(self, node_id):
|
||||
"""Get compute node by ID (*not* UUID)"""
|
||||
# We need to pass an object with an 'id' attribute to make it work
|
||||
return self.nova.hypervisors.get(utils.Struct(id=node_id))
|
||||
|
||||
def get_compute_node_by_hostname(self, node_hostname):
|
||||
"""Get compute node by ID (*not* UUID)"""
|
||||
# We need to pass an object with an 'id' attribute to make it work
|
||||
try:
|
||||
compute_nodes = self.nova.hypervisors.search(node_hostname)
|
||||
if len(compute_nodes) != 1:
|
||||
raise exception.ComputeNodeNotFound(name=node_hostname)
|
||||
|
||||
return self.get_compute_node_by_id(compute_nodes[0].id)
|
||||
except Exception as exc:
|
||||
LOG.exception(exc)
|
||||
raise exception.ComputeNodeNotFound(name=node_hostname)
|
||||
|
||||
def find_instance(self, instance_id):
|
||||
search_opts = {'all_tenants': True}
|
||||
instances = self.nova.servers.list(detailed=True,
|
||||
|
@ -83,7 +83,7 @@ class NovaClusterDataModelCollector(base.BaseClusterDataModelCollector):
|
||||
for n in nodes:
|
||||
service = self.wrapper.nova.services.find(id=n.service['id'])
|
||||
# create node in cluster_model_collector
|
||||
node = element.ComputeNode()
|
||||
node = element.ComputeNode(n.id)
|
||||
node.uuid = service.host
|
||||
node.hostname = n.hypervisor_hostname
|
||||
# set capacity
|
||||
@ -105,7 +105,10 @@ class NovaClusterDataModelCollector(base.BaseClusterDataModelCollector):
|
||||
# set capacity
|
||||
self.wrapper.get_flavor_instance(v, flavor_cache)
|
||||
mem.set_capacity(instance, v.flavor['ram'])
|
||||
# FIXME: update all strategies to use disk_capacity
|
||||
# for instances instead of disk
|
||||
disk.set_capacity(instance, v.flavor['disk'])
|
||||
disk_capacity.set_capacity(instance, v.flavor['disk'])
|
||||
num_cores.set_capacity(instance, v.flavor['vcpus'])
|
||||
|
||||
model.map_instance(instance, node)
|
||||
|
@ -28,8 +28,9 @@ class ServiceState(enum.Enum):
|
||||
|
||||
class ComputeNode(compute_resource.ComputeResource):
|
||||
|
||||
def __init__(self):
|
||||
def __init__(self, id):
|
||||
super(ComputeNode, self).__init__()
|
||||
self.id = id
|
||||
self._state = ServiceState.ONLINE.value
|
||||
self._status = ServiceState.ENABLED.value
|
||||
|
||||
|
@ -16,6 +16,8 @@
|
||||
|
||||
import enum
|
||||
|
||||
from watcher.common import exception
|
||||
|
||||
|
||||
class ResourceType(enum.Enum):
|
||||
cpu_cores = 'num_cores'
|
||||
@ -50,12 +52,12 @@ class Resource(object):
|
||||
def unset_capacity(self, element):
|
||||
del self.mapping[element.uuid]
|
||||
|
||||
def get_capacity_from_id(self, uuid):
|
||||
if str(uuid) in self.mapping.keys():
|
||||
def get_capacity_by_uuid(self, uuid):
|
||||
try:
|
||||
return self.mapping[str(uuid)]
|
||||
else:
|
||||
# TODO(jed) throw exception
|
||||
return None
|
||||
except KeyError:
|
||||
raise exception.CapacityNotDefined(
|
||||
capacity=self.name.value, resource=str(uuid))
|
||||
|
||||
def get_capacity(self, element):
|
||||
return self.get_capacity_from_id(element.uuid)
|
||||
return self.get_capacity_by_uuid(element.uuid)
|
||||
|
@ -58,9 +58,9 @@ class Mapping(object):
|
||||
:param node: the node
|
||||
:param instance: the virtual machine or instance
|
||||
"""
|
||||
self.unmap_from_id(node.uuid, instance.uuid)
|
||||
self.unmap_by_uuid(node.uuid, instance.uuid)
|
||||
|
||||
def unmap_from_id(self, node_uuid, instance_uuid):
|
||||
def unmap_by_uuid(self, node_uuid, instance_uuid):
|
||||
"""Remove the instance (by id) from the node (by id)
|
||||
|
||||
:rtype : object
|
||||
@ -84,15 +84,15 @@ class Mapping(object):
|
||||
return self.compute_node_mapping
|
||||
|
||||
def get_node_from_instance(self, instance):
|
||||
return self.get_node_from_instance_id(instance.uuid)
|
||||
return self.get_node_by_instance_uuid(instance.uuid)
|
||||
|
||||
def get_node_from_instance_id(self, instance_uuid):
|
||||
def get_node_by_instance_uuid(self, instance_uuid):
|
||||
"""Getting host information from the guest instance
|
||||
|
||||
:param instance: the uuid of the instance
|
||||
:return: node
|
||||
"""
|
||||
return self.model.get_node_from_id(
|
||||
return self.model.get_node_by_uuid(
|
||||
self.instance_mapping[str(instance_uuid)])
|
||||
|
||||
def get_node_instances(self, node):
|
||||
@ -101,9 +101,9 @@ class Mapping(object):
|
||||
:param node:
|
||||
:return:
|
||||
"""
|
||||
return self.get_node_instances_from_id(node.uuid)
|
||||
return self.get_node_instances_by_uuid(node.uuid)
|
||||
|
||||
def get_node_instances_from_id(self, node_uuid):
|
||||
def get_node_instances_by_uuid(self, node_uuid):
|
||||
if str(node_uuid) in self.compute_node_mapping.keys():
|
||||
return self.compute_node_mapping[str(node_uuid)]
|
||||
else:
|
||||
|
@ -77,9 +77,9 @@ class ModelRoot(object):
|
||||
:type node: str or :py:class:`~.Instance`
|
||||
"""
|
||||
if isinstance(instance, six.string_types):
|
||||
instance = self.get_instance_from_id(instance)
|
||||
instance = self.get_instance_by_uuid(instance)
|
||||
if isinstance(node, six.string_types):
|
||||
node = self.get_node_from_id(node)
|
||||
node = self.get_node_by_uuid(node)
|
||||
|
||||
self.add_instance(instance)
|
||||
self.mapping.map(node, instance)
|
||||
@ -93,17 +93,18 @@ class ModelRoot(object):
|
||||
:type node: str or :py:class:`~.Instance`
|
||||
"""
|
||||
if isinstance(instance, six.string_types):
|
||||
instance = self.get_instance_from_id(instance)
|
||||
instance = self.get_instance_by_uuid(instance)
|
||||
if isinstance(node, six.string_types):
|
||||
node = self.get_node_from_id(node)
|
||||
node = self.get_node_by_uuid(node)
|
||||
|
||||
self.add_instance(instance)
|
||||
self.mapping.unmap(node, instance)
|
||||
|
||||
def delete_instance(self, instance, node):
|
||||
self.remove_instance(instance)
|
||||
def delete_instance(self, instance, node=None):
|
||||
if node is not None:
|
||||
self.mapping.unmap(node, instance)
|
||||
|
||||
self.mapping.unmap(node, instance)
|
||||
self.remove_instance(instance)
|
||||
|
||||
for resource in self.resource.values():
|
||||
try:
|
||||
@ -130,17 +131,17 @@ class ModelRoot(object):
|
||||
def get_all_compute_nodes(self):
|
||||
return self._nodes
|
||||
|
||||
def get_node_from_id(self, node_uuid):
|
||||
def get_node_by_uuid(self, node_uuid):
|
||||
if str(node_uuid) not in self._nodes:
|
||||
raise exception.ComputeNodeNotFound(name=node_uuid)
|
||||
return self._nodes[str(node_uuid)]
|
||||
|
||||
def get_instance_from_id(self, uuid):
|
||||
def get_instance_by_uuid(self, uuid):
|
||||
if str(uuid) not in self._instances:
|
||||
raise exception.InstanceNotFound(name=uuid)
|
||||
return self._instances[str(uuid)]
|
||||
|
||||
def get_node_from_instance_id(self, instance_uuid):
|
||||
def get_node_by_instance_uuid(self, instance_uuid):
|
||||
"""Getting host information from the guest instance
|
||||
|
||||
:param instance_uuid: the uuid of the instance
|
||||
@ -148,7 +149,7 @@ class ModelRoot(object):
|
||||
"""
|
||||
if str(instance_uuid) not in self.mapping.instance_mapping:
|
||||
raise exception.InstanceNotFound(name=instance_uuid)
|
||||
return self.get_node_from_id(
|
||||
return self.get_node_by_uuid(
|
||||
self.mapping.instance_mapping[str(instance_uuid)])
|
||||
|
||||
def get_all_instances(self):
|
||||
@ -160,7 +161,7 @@ class ModelRoot(object):
|
||||
def create_resource(self, r):
|
||||
self.resource[str(r.name)] = r
|
||||
|
||||
def get_resource_from_id(self, resource_id):
|
||||
def get_resource_by_uuid(self, resource_id):
|
||||
return self.resource[str(resource_id)]
|
||||
|
||||
def get_node_instances(self, node):
|
||||
@ -168,11 +169,12 @@ class ModelRoot(object):
|
||||
|
||||
def _build_compute_node_element(self, compute_node):
|
||||
attrib = collections.OrderedDict(
|
||||
uuid=compute_node.uuid, human_id=compute_node.human_id,
|
||||
hostname=compute_node.hostname, state=compute_node.state,
|
||||
status=compute_node.status)
|
||||
id=six.text_type(compute_node.id), uuid=compute_node.uuid,
|
||||
human_id=compute_node.human_id, hostname=compute_node.hostname,
|
||||
state=compute_node.state, status=compute_node.status)
|
||||
|
||||
for resource_name, resource in self.resource.items():
|
||||
for resource_name, resource in sorted(
|
||||
self.resource.items(), key=lambda x: x[0]):
|
||||
res_value = resource.get_capacity(compute_node)
|
||||
if res_value is not None:
|
||||
attrib[resource_name] = six.text_type(res_value)
|
||||
@ -186,7 +188,8 @@ class ModelRoot(object):
|
||||
uuid=instance.uuid, human_id=instance.human_id,
|
||||
hostname=instance.hostname, state=instance.state)
|
||||
|
||||
for resource_name, resource in self.resource.items():
|
||||
for resource_name, resource in sorted(
|
||||
self.resource.items(), key=lambda x: x[0]):
|
||||
res_value = resource.get_capacity(instance)
|
||||
if res_value is not None:
|
||||
attrib[resource_name] = six.text_type(res_value)
|
||||
@ -205,7 +208,7 @@ class ModelRoot(object):
|
||||
# Build mapped instance tree
|
||||
node_instance_uuids = self.get_node_instances(cn)
|
||||
for instance_uuid in sorted(node_instance_uuids):
|
||||
instance = self.get_instance_from_id(instance_uuid)
|
||||
instance = self.get_instance_by_uuid(instance_uuid)
|
||||
instance_el = self._build_instance_element(instance)
|
||||
compute_node_el.append(instance_el)
|
||||
|
||||
@ -215,7 +218,7 @@ class ModelRoot(object):
|
||||
for instance in sorted(self.get_all_instances().values(),
|
||||
key=lambda inst: inst.uuid):
|
||||
try:
|
||||
self.get_node_from_instance_id(instance.uuid)
|
||||
self.get_node_by_instance_uuid(instance.uuid)
|
||||
except exception.InstanceNotFound:
|
||||
root.append(self._build_instance_element(instance))
|
||||
|
||||
|
@ -20,6 +20,7 @@ from oslo_log import log
|
||||
|
||||
from watcher._i18n import _LI
|
||||
from watcher.common import exception
|
||||
from watcher.common import nova_helper
|
||||
from watcher.decision_engine.model import element
|
||||
from watcher.decision_engine.model.notification import base
|
||||
from watcher.decision_engine.model.notification import filtering
|
||||
@ -29,9 +30,19 @@ LOG = log.getLogger(__name__)
|
||||
|
||||
class NovaNotification(base.NotificationEndpoint):
|
||||
|
||||
def __init__(self, collector):
|
||||
super(NovaNotification, self).__init__(collector)
|
||||
self._nova = None
|
||||
|
||||
@property
|
||||
def nova(self):
|
||||
if self._nova is None:
|
||||
self._nova = nova_helper.NovaHelper()
|
||||
return self._nova
|
||||
|
||||
def get_or_create_instance(self, uuid):
|
||||
try:
|
||||
instance = self.cluster_data_model.get_instance_from_id(uuid)
|
||||
instance = self.cluster_data_model.get_instance_by_uuid(uuid)
|
||||
except exception.InstanceNotFound:
|
||||
# The instance didn't exist yet so we create a new instance object
|
||||
LOG.debug("New instance created: %s", uuid)
|
||||
@ -59,13 +70,20 @@ class NovaNotification(base.NotificationEndpoint):
|
||||
element.ResourceType.cpu_cores, instance, num_cores)
|
||||
self.update_capacity(
|
||||
element.ResourceType.disk, instance, disk_gb)
|
||||
self.update_capacity(
|
||||
element.ResourceType.disk_capacity, instance, disk_gb)
|
||||
|
||||
node = self.get_or_create_node(instance_data['host'])
|
||||
try:
|
||||
node = self.get_or_create_node(instance_data['host'])
|
||||
except exception.ComputeNodeNotFound as exc:
|
||||
LOG.exception(exc)
|
||||
# If we can't create the node, we consider the instance as unmapped
|
||||
node = None
|
||||
|
||||
self.update_instance_mapping(instance, node)
|
||||
|
||||
def update_capacity(self, resource_id, obj, value):
|
||||
resource = self.cluster_data_model.get_resource_from_id(resource_id)
|
||||
resource = self.cluster_data_model.get_resource_by_uuid(resource_id)
|
||||
resource.set_capacity(obj, value)
|
||||
|
||||
def legacy_update_instance(self, instance, data):
|
||||
@ -82,34 +100,83 @@ class NovaNotification(base.NotificationEndpoint):
|
||||
element.ResourceType.cpu_cores, instance, num_cores)
|
||||
self.update_capacity(
|
||||
element.ResourceType.disk, instance, disk_gb)
|
||||
self.update_capacity(
|
||||
element.ResourceType.disk_capacity, instance, disk_gb)
|
||||
|
||||
node = self.get_or_create_node(data['host'])
|
||||
try:
|
||||
node = self.get_or_create_node(data['host'])
|
||||
except exception.ComputeNodeNotFound as exc:
|
||||
LOG.exception(exc)
|
||||
# If we can't create the node, we consider the instance as unmapped
|
||||
node = None
|
||||
|
||||
self.update_instance_mapping(instance, node)
|
||||
|
||||
def update_compute_node(self, node, data):
|
||||
"""Update the compute node using the notification data."""
|
||||
node_data = data['nova_object.data']
|
||||
node.hostname = node_data['host']
|
||||
node.state = (
|
||||
element.ServiceState.OFFLINE.value
|
||||
if node_data['forced_down'] else element.ServiceState.ONLINE.value)
|
||||
node.status = (
|
||||
element.ServiceState.DISABLED.value
|
||||
if node_data['host'] else element.ServiceState.ENABLED.value)
|
||||
|
||||
def create_compute_node(self, node_hostname):
|
||||
"""Update the compute node by querying the Nova API."""
|
||||
try:
|
||||
_node = self.nova.get_compute_node_by_hostname(node_hostname)
|
||||
node = element.ComputeNode(_node.id)
|
||||
node.uuid = node_hostname
|
||||
node.hostname = _node.hypervisor_hostname
|
||||
node.state = _node.state
|
||||
node.status = _node.status
|
||||
|
||||
self.update_capacity(
|
||||
element.ResourceType.memory, node, _node.memory_mb)
|
||||
self.update_capacity(
|
||||
element.ResourceType.cpu_cores, node, _node.vcpus)
|
||||
self.update_capacity(
|
||||
element.ResourceType.disk, node, _node.free_disk_gb)
|
||||
self.update_capacity(
|
||||
element.ResourceType.disk_capacity, node, _node.local_gb)
|
||||
return node
|
||||
except Exception as exc:
|
||||
LOG.exception(exc)
|
||||
LOG.debug("Could not refresh the node %s.", node_hostname)
|
||||
raise exception.ComputeNodeNotFound(name=node_hostname)
|
||||
|
||||
return False
|
||||
|
||||
def get_or_create_node(self, uuid):
|
||||
if uuid is None:
|
||||
LOG.debug("Compute node UUID not provided: skipping")
|
||||
return
|
||||
try:
|
||||
node = self.cluster_data_model.get_node_from_id(uuid)
|
||||
return self.cluster_data_model.get_node_by_uuid(uuid)
|
||||
except exception.ComputeNodeNotFound:
|
||||
# The node didn't exist yet so we create a new node object
|
||||
node = self.create_compute_node(uuid)
|
||||
LOG.debug("New compute node created: %s", uuid)
|
||||
node = element.ComputeNode()
|
||||
node.uuid = uuid
|
||||
|
||||
self.cluster_data_model.add_node(node)
|
||||
|
||||
return node
|
||||
return node
|
||||
|
||||
def update_instance_mapping(self, instance, node):
|
||||
if not node:
|
||||
if node is None:
|
||||
self.cluster_data_model.add_instance(instance)
|
||||
LOG.debug("Instance %s not yet attached to any node: skipping",
|
||||
instance.uuid)
|
||||
return
|
||||
try:
|
||||
old_node = self.get_or_create_node(node.uuid)
|
||||
try:
|
||||
old_node = self.get_or_create_node(node.uuid)
|
||||
except exception.ComputeNodeNotFound as exc:
|
||||
LOG.exception(exc)
|
||||
# If we can't create the node,
|
||||
# we consider the instance as unmapped
|
||||
old_node = None
|
||||
|
||||
LOG.debug("Mapped node %s found", node.uuid)
|
||||
if node and node != old_node:
|
||||
LOG.debug("Unmapping instance %s from %s",
|
||||
@ -126,8 +193,7 @@ class NovaNotification(base.NotificationEndpoint):
|
||||
def delete_instance(self, instance, node):
|
||||
try:
|
||||
self.cluster_data_model.delete_instance(instance, node)
|
||||
except Exception as exc:
|
||||
LOG.exception(exc)
|
||||
except Exception:
|
||||
LOG.info(_LI("Instance %s already deleted"), instance.uuid)
|
||||
|
||||
|
||||
@ -150,19 +216,18 @@ class ServiceUpdated(VersionnedNotificationEndpoint):
|
||||
)
|
||||
|
||||
def info(self, ctxt, publisher_id, event_type, payload, metadata):
|
||||
LOG.info(_LI("Event '%(event)s' received from %(publisher)s") %
|
||||
dict(event=event_type, publisher=publisher_id))
|
||||
LOG.info(_LI("Event '%(event)s' received from %(publisher)s "
|
||||
"with metadata %(metadata)s") %
|
||||
dict(event=event_type,
|
||||
publisher=publisher_id,
|
||||
metadata=metadata))
|
||||
node_data = payload['nova_object.data']
|
||||
node_uuid = node_data['host']
|
||||
node = self.get_or_create_node(node_uuid)
|
||||
|
||||
node.hostname = node_data['host']
|
||||
node.state = (
|
||||
element.ServiceState.OFFLINE.value
|
||||
if node_data['forced_down'] else element.ServiceState.ONLINE.value)
|
||||
node.status = (
|
||||
element.ServiceState.DISABLED.value
|
||||
if node_data['host'] else element.ServiceState.ENABLED.value)
|
||||
try:
|
||||
node = self.get_or_create_node(node_uuid)
|
||||
self.update_compute_node(node, payload)
|
||||
except exception.ComputeNodeNotFound as exc:
|
||||
LOG.exception(exc)
|
||||
|
||||
|
||||
class InstanceCreated(VersionnedNotificationEndpoint):
|
||||
@ -192,8 +257,11 @@ class InstanceCreated(VersionnedNotificationEndpoint):
|
||||
)
|
||||
|
||||
def info(self, ctxt, publisher_id, event_type, payload, metadata):
|
||||
LOG.info(_LI("Event '%(event)s' received from %(publisher)s") %
|
||||
dict(event=event_type, publisher=publisher_id))
|
||||
LOG.info(_LI("Event '%(event)s' received from %(publisher)s "
|
||||
"with metadata %(metadata)s") %
|
||||
dict(event=event_type,
|
||||
publisher=publisher_id,
|
||||
metadata=metadata))
|
||||
instance_data = payload['nova_object.data']
|
||||
|
||||
instance_uuid = instance_data['uuid']
|
||||
@ -221,8 +289,11 @@ class InstanceUpdated(VersionnedNotificationEndpoint):
|
||||
)
|
||||
|
||||
def info(self, ctxt, publisher_id, event_type, payload, metadata):
|
||||
LOG.info(_LI("Event '%(event)s' received from %(publisher)s") %
|
||||
dict(event=event_type, publisher=publisher_id))
|
||||
LOG.info(_LI("Event '%(event)s' received from %(publisher)s "
|
||||
"with metadata %(metadata)s") %
|
||||
dict(event=event_type,
|
||||
publisher=publisher_id,
|
||||
metadata=metadata))
|
||||
instance_data = payload['nova_object.data']
|
||||
instance_uuid = instance_data['uuid']
|
||||
instance = self.get_or_create_instance(instance_uuid)
|
||||
@ -241,14 +312,22 @@ class InstanceDeletedEnd(VersionnedNotificationEndpoint):
|
||||
)
|
||||
|
||||
def info(self, ctxt, publisher_id, event_type, payload, metadata):
|
||||
LOG.info(_LI("Event '%(event)s' received from %(publisher)s") %
|
||||
dict(event=event_type, publisher=publisher_id))
|
||||
LOG.info(_LI("Event '%(event)s' received from %(publisher)s "
|
||||
"with metadata %(metadata)s") %
|
||||
dict(event=event_type,
|
||||
publisher=publisher_id,
|
||||
metadata=metadata))
|
||||
|
||||
instance_data = payload['nova_object.data']
|
||||
instance_uuid = instance_data['uuid']
|
||||
instance = self.get_or_create_instance(instance_uuid)
|
||||
|
||||
node = self.get_or_create_node(instance_data['host'])
|
||||
try:
|
||||
node = self.get_or_create_node(instance_data['host'])
|
||||
except exception.ComputeNodeNotFound as exc:
|
||||
LOG.exception(exc)
|
||||
# If we can't create the node, we consider the instance as unmapped
|
||||
node = None
|
||||
|
||||
self.delete_instance(instance, node)
|
||||
|
||||
@ -264,8 +343,11 @@ class LegacyInstanceUpdated(UnversionnedNotificationEndpoint):
|
||||
)
|
||||
|
||||
def info(self, ctxt, publisher_id, event_type, payload, metadata):
|
||||
LOG.info(_LI("Event '%(event)s' received from %(publisher)s") %
|
||||
dict(event=event_type, publisher=publisher_id))
|
||||
LOG.info(_LI("Event '%(event)s' received from %(publisher)s "
|
||||
"with metadata %(metadata)s") %
|
||||
dict(event=event_type,
|
||||
publisher=publisher_id,
|
||||
metadata=metadata))
|
||||
|
||||
instance_uuid = payload['instance_id']
|
||||
instance = self.get_or_create_instance(instance_uuid)
|
||||
@ -284,8 +366,11 @@ class LegacyInstanceCreatedEnd(UnversionnedNotificationEndpoint):
|
||||
)
|
||||
|
||||
def info(self, ctxt, publisher_id, event_type, payload, metadata):
|
||||
LOG.info(_LI("Event '%(event)s' received from %(publisher)s") %
|
||||
dict(event=event_type, publisher=publisher_id))
|
||||
LOG.info(_LI("Event '%(event)s' received from %(publisher)s "
|
||||
"with metadata %(metadata)s") %
|
||||
dict(event=event_type,
|
||||
publisher=publisher_id,
|
||||
metadata=metadata))
|
||||
|
||||
instance_uuid = payload['instance_id']
|
||||
instance = self.get_or_create_instance(instance_uuid)
|
||||
@ -304,12 +389,20 @@ class LegacyInstanceDeletedEnd(UnversionnedNotificationEndpoint):
|
||||
)
|
||||
|
||||
def info(self, ctxt, publisher_id, event_type, payload, metadata):
|
||||
LOG.info(_LI("Event '%(event)s' received from %(publisher)s") %
|
||||
dict(event=event_type, publisher=publisher_id))
|
||||
LOG.info(_LI("Event '%(event)s' received from %(publisher)s "
|
||||
"with metadata %(metadata)s") %
|
||||
dict(event=event_type,
|
||||
publisher=publisher_id,
|
||||
metadata=metadata))
|
||||
instance_uuid = payload['instance_id']
|
||||
instance = self.get_or_create_instance(instance_uuid)
|
||||
|
||||
node = self.get_or_create_node(payload['host'])
|
||||
try:
|
||||
node = self.get_or_create_node(payload['host'])
|
||||
except exception.ComputeNodeNotFound as exc:
|
||||
LOG.exception(exc)
|
||||
# If we can't create the node, we consider the instance as unmapped
|
||||
node = None
|
||||
|
||||
self.delete_instance(instance, node)
|
||||
|
||||
@ -325,8 +418,11 @@ class LegacyLiveMigratedEnd(UnversionnedNotificationEndpoint):
|
||||
)
|
||||
|
||||
def info(self, ctxt, publisher_id, event_type, payload, metadata):
|
||||
LOG.info(_LI("Event '%(event)s' received from %(publisher)s") %
|
||||
dict(event=event_type, publisher=publisher_id))
|
||||
LOG.info(_LI("Event '%(event)s' received from %(publisher)s "
|
||||
"with metadata %(metadata)s") %
|
||||
dict(event=event_type,
|
||||
publisher=publisher_id,
|
||||
metadata=metadata))
|
||||
|
||||
instance_uuid = payload['instance_id']
|
||||
instance = self.get_or_create_instance(instance_uuid)
|
||||
|
@ -152,16 +152,16 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
|
||||
total_cores = 0
|
||||
total_disk = 0
|
||||
total_mem = 0
|
||||
cpu_capacity = self.compute_model.get_resource_from_id(
|
||||
cpu_capacity = self.compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.cpu_cores)
|
||||
disk_capacity = self.compute_model.get_resource_from_id(
|
||||
disk_capacity = self.compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.disk)
|
||||
memory_capacity = self.compute_model.get_resource_from_id(
|
||||
memory_capacity = self.compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.memory)
|
||||
|
||||
for instance_id in self.compute_model. \
|
||||
get_mapping().get_node_instances(destination_node):
|
||||
instance = self.compute_model.get_instance_from_id(instance_id)
|
||||
for instance_id in self.compute_model.mapping.get_node_instances(
|
||||
destination_node):
|
||||
instance = self.compute_model.get_instance_by_uuid(instance_id)
|
||||
total_cores += cpu_capacity.get_capacity(instance)
|
||||
total_disk += disk_capacity.get_capacity(instance)
|
||||
total_mem += memory_capacity.get_capacity(instance)
|
||||
@ -188,11 +188,11 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
|
||||
:param total_mem: total memory used by the virtual machine
|
||||
:return: True if the threshold is not exceed
|
||||
"""
|
||||
cpu_capacity = self.compute_model.get_resource_from_id(
|
||||
cpu_capacity = self.compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.cpu_cores).get_capacity(destination_node)
|
||||
disk_capacity = self.compute_model.get_resource_from_id(
|
||||
disk_capacity = self.compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.disk).get_capacity(destination_node)
|
||||
memory_capacity = self.compute_model.get_resource_from_id(
|
||||
memory_capacity = self.compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.memory).get_capacity(destination_node)
|
||||
|
||||
return (cpu_capacity >= total_cores * self.threshold_cores and
|
||||
@ -219,13 +219,13 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
|
||||
:param total_memory_used:
|
||||
:return:
|
||||
"""
|
||||
cpu_capacity = self.compute_model.get_resource_from_id(
|
||||
cpu_capacity = self.compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.cpu_cores).get_capacity(compute_resource)
|
||||
|
||||
disk_capacity = self.compute_model.get_resource_from_id(
|
||||
disk_capacity = self.compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.disk).get_capacity(compute_resource)
|
||||
|
||||
memory_capacity = self.compute_model.get_resource_from_id(
|
||||
memory_capacity = self.compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.memory).get_capacity(compute_resource)
|
||||
|
||||
score_cores = (1 - (float(cpu_capacity) - float(total_cores_used)) /
|
||||
@ -266,7 +266,7 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
|
||||
metric_name=self.HOST_CPU_USAGE_METRIC_NAME))
|
||||
host_avg_cpu_util = 100
|
||||
|
||||
cpu_capacity = self.compute_model.get_resource_from_id(
|
||||
cpu_capacity = self.compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.cpu_cores).get_capacity(node)
|
||||
|
||||
total_cores_used = cpu_capacity * (host_avg_cpu_util / 100.0)
|
||||
@ -306,7 +306,7 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
|
||||
metric_name=self.INSTANCE_CPU_USAGE_METRIC_NAME))
|
||||
instance_cpu_utilization = 100
|
||||
|
||||
cpu_capacity = self.compute_model.get_resource_from_id(
|
||||
cpu_capacity = self.compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.cpu_cores).get_capacity(instance)
|
||||
|
||||
total_cores_used = cpu_capacity * (instance_cpu_utilization / 100.0)
|
||||
@ -334,8 +334,7 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
|
||||
def score_of_nodes(self, score):
|
||||
"""Calculate score of nodes based on load by VMs"""
|
||||
for node in self.compute_model.get_all_compute_nodes().values():
|
||||
count = self.compute_model.mapping.get_node_instances_from_id(
|
||||
node.uuid)
|
||||
count = self.compute_model.mapping.get_node_instances(node)
|
||||
if len(count) > 0:
|
||||
result = self.calculate_score_node(node)
|
||||
else:
|
||||
@ -348,13 +347,12 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
|
||||
def node_and_instance_score(self, sorted_score, score):
|
||||
"""Get List of VMs from node"""
|
||||
node_to_release = sorted_score[len(score) - 1][0]
|
||||
instances_to_migrate = (
|
||||
self.compute_model.mapping.get_node_instances_from_id(
|
||||
node_to_release))
|
||||
instances_to_migrate = self.compute_model.mapping.get_node_instances(
|
||||
self.compute_model.get_node_by_uuid(node_to_release))
|
||||
|
||||
instance_score = []
|
||||
for instance_id in instances_to_migrate:
|
||||
instance = self.compute_model.get_instance_from_id(instance_id)
|
||||
instance = self.compute_model.get_instance_by_uuid(instance_id)
|
||||
if instance.state == element.InstanceState.ACTIVE.value:
|
||||
instance_score.append(
|
||||
(instance_id, self.calculate_score_instance(instance)))
|
||||
@ -370,7 +368,7 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
|
||||
mig_source_node.uuid,
|
||||
mig_destination_node.uuid)
|
||||
|
||||
if len(self.compute_model.get_mapping().get_node_instances(
|
||||
if len(self.compute_model.mapping.get_node_instances(
|
||||
mig_source_node)) == 0:
|
||||
self.add_change_service_state(mig_source_node.
|
||||
uuid,
|
||||
@ -382,11 +380,11 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
|
||||
number_migrations = 0
|
||||
for instance in sorted_instances:
|
||||
for j in range(0, len(sorted_score)):
|
||||
mig_instance = self.compute_model.get_instance_from_id(
|
||||
mig_instance = self.compute_model.get_instance_by_uuid(
|
||||
instance[0])
|
||||
mig_source_node = self.compute_model.get_node_from_id(
|
||||
mig_source_node = self.compute_model.get_node_by_uuid(
|
||||
node_to_release)
|
||||
mig_destination_node = self.compute_model.get_node_from_id(
|
||||
mig_destination_node = self.compute_model.get_node_by_uuid(
|
||||
sorted_score[j][0])
|
||||
|
||||
result = self.check_migration(
|
||||
@ -411,6 +409,8 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
|
||||
if not self.compute_model:
|
||||
raise exception.ClusterStateNotDefined()
|
||||
|
||||
LOG.debug(self.compute_model.to_string())
|
||||
|
||||
def do_execute(self):
|
||||
# todo(jed) clone model
|
||||
self.efficacy = 100
|
||||
@ -425,8 +425,8 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
|
||||
|
||||
for node_uuid, node in self.compute_model.get_all_compute_nodes(
|
||||
).items():
|
||||
node_instances = (self.compute_model.mapping
|
||||
.get_node_instances_from_id(node_uuid))
|
||||
node_instances = self.compute_model.mapping.get_node_instances(
|
||||
node)
|
||||
if node_instances:
|
||||
if node.state == element.ServiceState.ENABLED:
|
||||
self.add_change_service_state(
|
||||
|
@ -130,7 +130,7 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
|
||||
disk_gb_used = 0
|
||||
if len(instances) > 0:
|
||||
for instance_id in instances:
|
||||
instance = self.compute_model.get_instance_from_id(instance_id)
|
||||
instance = self.compute_model.get_instance_by_uuid(instance_id)
|
||||
vcpus_used += cpu_capacity.get_capacity(instance)
|
||||
memory_mb_used += memory_capacity.get_capacity(instance)
|
||||
disk_gb_used += disk_capacity.get_capacity(instance)
|
||||
@ -147,7 +147,7 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
|
||||
hosts_need_release = []
|
||||
hosts_target = []
|
||||
for node_id in nodes:
|
||||
node = self.compute_model.get_node_from_id(
|
||||
node = self.compute_model.get_node_by_uuid(
|
||||
node_id)
|
||||
resource_id = node.uuid
|
||||
|
||||
@ -180,7 +180,7 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
|
||||
for instance_id in instances_of_src:
|
||||
try:
|
||||
# select the first active instance to migrate
|
||||
instance = self.compute_model.get_instance_from_id(
|
||||
instance = self.compute_model.get_instance_by_uuid(
|
||||
instance_id)
|
||||
if (instance.state !=
|
||||
element.InstanceState.ACTIVE.value):
|
||||
@ -196,11 +196,11 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
|
||||
|
||||
def filter_dest_servers(self, hosts, instance_to_migrate):
|
||||
"""Only return hosts with sufficient available resources"""
|
||||
cpu_capacity = self.compute_model.get_resource_from_id(
|
||||
cpu_capacity = self.compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.cpu_cores)
|
||||
disk_capacity = self.compute_model.get_resource_from_id(
|
||||
disk_capacity = self.compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.disk)
|
||||
memory_capacity = self.compute_model.get_resource_from_id(
|
||||
memory_capacity = self.compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.memory)
|
||||
|
||||
required_cores = cpu_capacity.get_capacity(instance_to_migrate)
|
||||
|
@ -144,7 +144,7 @@ class UniformAirflow(base.BaseStrategy):
|
||||
memory_mb_used = 0
|
||||
disk_gb_used = 0
|
||||
for instance_id in instances:
|
||||
instance = self.compute_model.get_instance_from_id(
|
||||
instance = self.compute_model.get_instance_by_uuid(
|
||||
instance_id)
|
||||
vcpus_used += cap_cores.get_capacity(instance)
|
||||
memory_mb_used += cap_mem.get_capacity(instance)
|
||||
@ -179,7 +179,7 @@ class UniformAirflow(base.BaseStrategy):
|
||||
for instance_id in source_instances:
|
||||
try:
|
||||
instance = (self.compute_model.
|
||||
get_instance_from_id(instance_id))
|
||||
get_instance_by_uuid(instance_id))
|
||||
instances_tobe_migrate.append(instance)
|
||||
except wexc.InstanceNotFound:
|
||||
LOG.error(_LE("Instance not found; error: %s"),
|
||||
@ -190,7 +190,7 @@ class UniformAirflow(base.BaseStrategy):
|
||||
for instance_id in source_instances:
|
||||
try:
|
||||
instance = (self.compute_model.
|
||||
get_instance_from_id(instance_id))
|
||||
get_instance_by_uuid(instance_id))
|
||||
if (instance.state !=
|
||||
element.InstanceState.ACTIVE.value):
|
||||
LOG.info(
|
||||
@ -209,11 +209,11 @@ class UniformAirflow(base.BaseStrategy):
|
||||
def filter_destination_hosts(self, hosts, instances_to_migrate):
|
||||
"""Find instance and host with sufficient available resources"""
|
||||
|
||||
cap_cores = self.compute_model.get_resource_from_id(
|
||||
cap_cores = self.compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.cpu_cores)
|
||||
cap_disk = self.compute_model.get_resource_from_id(
|
||||
cap_disk = self.compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.disk)
|
||||
cap_mem = self.compute_model.get_resource_from_id(
|
||||
cap_mem = self.compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.memory)
|
||||
# large instance go first
|
||||
instances_to_migrate = sorted(
|
||||
@ -265,7 +265,7 @@ class UniformAirflow(base.BaseStrategy):
|
||||
overload_hosts = []
|
||||
nonoverload_hosts = []
|
||||
for node_id in nodes:
|
||||
node = self.compute_model.get_node_from_id(
|
||||
node = self.compute_model.get_node_by_uuid(
|
||||
node_id)
|
||||
resource_id = node.uuid
|
||||
airflow = self.ceilometer.statistic_aggregation(
|
||||
|
@ -162,7 +162,7 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
|
||||
:param model: model_root object
|
||||
:return: None
|
||||
"""
|
||||
instance = model.get_instance_from_id(instance_uuid)
|
||||
instance = model.get_instance_by_uuid(instance_uuid)
|
||||
|
||||
instance_state_str = self.get_state_str(instance.state)
|
||||
if instance_state_str != element.InstanceState.ACTIVE.value:
|
||||
@ -226,9 +226,9 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
|
||||
instance_cpu_util = self.ceilometer.statistic_aggregation(
|
||||
resource_id=instance_uuid, meter_name=cpu_util_metric,
|
||||
period=period, aggregate=aggr)
|
||||
instance_cpu_cores = model.get_resource_from_id(
|
||||
instance_cpu_cores = model.get_resource_by_uuid(
|
||||
element.ResourceType.cpu_cores).get_capacity(
|
||||
model.get_instance_from_id(instance_uuid))
|
||||
model.get_instance_by_uuid(instance_uuid))
|
||||
|
||||
if instance_cpu_util:
|
||||
total_cpu_utilization = (
|
||||
@ -271,7 +271,7 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
|
||||
:param aggr: string
|
||||
:return: dict(cpu(number of cores used), ram(MB used), disk(B used))
|
||||
"""
|
||||
node_instances = model.mapping.get_node_instances_from_id(
|
||||
node_instances = model.mapping.get_node_instances_by_uuid(
|
||||
node.uuid)
|
||||
node_ram_util = 0
|
||||
node_disk_util = 0
|
||||
@ -293,13 +293,13 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
|
||||
:param model: model_root object
|
||||
:return: dict(cpu(cores), ram(MB), disk(B))
|
||||
"""
|
||||
node_cpu_capacity = model.get_resource_from_id(
|
||||
node_cpu_capacity = model.get_resource_by_uuid(
|
||||
element.ResourceType.cpu_cores).get_capacity(node)
|
||||
|
||||
node_disk_capacity = model.get_resource_from_id(
|
||||
node_disk_capacity = model.get_resource_by_uuid(
|
||||
element.ResourceType.disk_capacity).get_capacity(node)
|
||||
|
||||
node_ram_capacity = model.get_resource_from_id(
|
||||
node_ram_capacity = model.get_resource_by_uuid(
|
||||
element.ResourceType.memory).get_capacity(node)
|
||||
return dict(cpu=node_cpu_capacity, ram=node_ram_capacity,
|
||||
disk=node_disk_capacity)
|
||||
|
@ -122,7 +122,7 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
|
||||
memory_mb_used = 0
|
||||
disk_gb_used = 0
|
||||
for instance_id in instances:
|
||||
instance = self.compute_model.get_instance_from_id(instance_id)
|
||||
instance = self.compute_model.get_instance_by_uuid(instance_id)
|
||||
vcpus_used += cap_cores.get_capacity(instance)
|
||||
memory_mb_used += cap_mem.get_capacity(instance)
|
||||
disk_gb_used += cap_disk.get_capacity(instance)
|
||||
@ -147,7 +147,7 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
|
||||
for inst_id in source_instances:
|
||||
try:
|
||||
# select the first active VM to migrate
|
||||
instance = self.compute_model.get_instance_from_id(
|
||||
instance = self.compute_model.get_instance_by_uuid(
|
||||
inst_id)
|
||||
if (instance.state !=
|
||||
element.InstanceState.ACTIVE.value):
|
||||
@ -164,7 +164,7 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
|
||||
instance_id)
|
||||
if instance_id:
|
||||
return (source_node,
|
||||
self.compute_model.get_instance_from_id(
|
||||
self.compute_model.get_instance_by_uuid(
|
||||
instance_id))
|
||||
else:
|
||||
LOG.info(_LI("VM not found from node: %s"),
|
||||
@ -174,11 +174,11 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
|
||||
avg_workload, workload_cache):
|
||||
'''Only return hosts with sufficient available resources'''
|
||||
|
||||
cap_cores = self.compute_model.get_resource_from_id(
|
||||
cap_cores = self.compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.cpu_cores)
|
||||
cap_disk = self.compute_model.get_resource_from_id(
|
||||
cap_disk = self.compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.disk)
|
||||
cap_mem = self.compute_model.get_resource_from_id(
|
||||
cap_mem = self.compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.memory)
|
||||
|
||||
required_cores = cap_cores.get_capacity(instance_to_migrate)
|
||||
@ -222,7 +222,7 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
|
||||
if not nodes:
|
||||
raise wexc.ClusterEmpty()
|
||||
# get cpu cores capacity of nodes and instances
|
||||
cap_cores = self.compute_model.get_resource_from_id(
|
||||
cap_cores = self.compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.cpu_cores)
|
||||
overload_hosts = []
|
||||
nonoverload_hosts = []
|
||||
@ -232,12 +232,12 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
|
||||
# use workload_cache to store the workload of VMs for reuse purpose
|
||||
workload_cache = {}
|
||||
for node_id in nodes:
|
||||
node = self.compute_model.get_node_from_id(
|
||||
node = self.compute_model.get_node_by_uuid(
|
||||
node_id)
|
||||
instances = self.compute_model.mapping.get_node_instances(node)
|
||||
node_workload = 0.0
|
||||
for instance_id in instances:
|
||||
instance = self.compute_model.get_instance_from_id(instance_id)
|
||||
instance = self.compute_model.get_instance_by_uuid(instance_id)
|
||||
try:
|
||||
cpu_util = self.ceilometer.statistic_aggregation(
|
||||
resource_id=instance_id,
|
||||
|
@ -172,9 +172,9 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
|
||||
:return: dict
|
||||
"""
|
||||
LOG.debug('get_instance_load started')
|
||||
instance_vcpus = self.compute_model.get_resource_from_id(
|
||||
instance_vcpus = self.compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.cpu_cores).get_capacity(
|
||||
self.compute_model.get_instance_from_id(instance_uuid))
|
||||
self.compute_model.get_instance_by_uuid(instance_uuid))
|
||||
instance_load = {'uuid': instance_uuid, 'vcpus': instance_vcpus}
|
||||
for meter in self.metrics:
|
||||
avg_meter = self.ceilometer.statistic_aggregation(
|
||||
@ -193,9 +193,9 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
|
||||
normalized_hosts = deepcopy(hosts)
|
||||
for host in normalized_hosts:
|
||||
if 'memory.resident' in normalized_hosts[host]:
|
||||
h_memory = self.compute_model.get_resource_from_id(
|
||||
h_memory = self.compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.memory).get_capacity(
|
||||
self.compute_model.get_node_from_id(host))
|
||||
self.compute_model.get_node_by_uuid(host))
|
||||
normalized_hosts[host]['memory.resident'] /= float(h_memory)
|
||||
|
||||
return normalized_hosts
|
||||
@ -205,9 +205,9 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
|
||||
hosts_load = {}
|
||||
for node_id in self.compute_model.get_all_compute_nodes():
|
||||
hosts_load[node_id] = {}
|
||||
host_vcpus = self.compute_model.get_resource_from_id(
|
||||
host_vcpus = self.compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.cpu_cores).get_capacity(
|
||||
self.compute_model.get_node_from_id(node_id))
|
||||
self.compute_model.get_node_by_uuid(node_id))
|
||||
hosts_load[node_id]['vcpus'] = host_vcpus
|
||||
|
||||
for metric in self.metrics:
|
||||
@ -304,10 +304,10 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
|
||||
nodes.remove(source_hp_id)
|
||||
node_list = yield_nodes(nodes)
|
||||
instances_id = self.compute_model.get_mapping(). \
|
||||
get_node_instances_from_id(source_hp_id)
|
||||
get_node_instances_by_uuid(source_hp_id)
|
||||
for instance_id in instances_id:
|
||||
min_sd_case = {'value': len(self.metrics)}
|
||||
instance = self.compute_model.get_instance_from_id(instance_id)
|
||||
instance = self.compute_model.get_instance_by_uuid(instance_id)
|
||||
if instance.state not in [element.InstanceState.ACTIVE.value,
|
||||
element.InstanceState.PAUSED.value]:
|
||||
continue
|
||||
@ -357,10 +357,10 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
|
||||
mig_destination_node.uuid)
|
||||
|
||||
def migrate(self, instance_uuid, src_host, dst_host):
|
||||
mig_instance = self.compute_model.get_instance_from_id(instance_uuid)
|
||||
mig_source_node = self.compute_model.get_node_from_id(
|
||||
mig_instance = self.compute_model.get_instance_by_uuid(instance_uuid)
|
||||
mig_source_node = self.compute_model.get_node_by_uuid(
|
||||
src_host)
|
||||
mig_destination_node = self.compute_model.get_node_from_id(
|
||||
mig_destination_node = self.compute_model.get_node_by_uuid(
|
||||
dst_host)
|
||||
self.create_migration_instance(mig_instance, mig_source_node,
|
||||
mig_destination_node)
|
||||
@ -382,13 +382,13 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
|
||||
min_sd = 1
|
||||
balanced = False
|
||||
for instance_host in migration:
|
||||
dst_hp_disk = self.compute_model.get_resource_from_id(
|
||||
dst_hp_disk = self.compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.disk).get_capacity(
|
||||
self.compute_model.get_node_from_id(
|
||||
self.compute_model.get_node_by_uuid(
|
||||
instance_host['host']))
|
||||
instance_disk = self.compute_model.get_resource_from_id(
|
||||
instance_disk = self.compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.disk).get_capacity(
|
||||
self.compute_model.get_instance_from_id(
|
||||
self.compute_model.get_instance_by_uuid(
|
||||
instance_host['instance']))
|
||||
if instance_disk > dst_hp_disk:
|
||||
continue
|
||||
|
@ -0,0 +1,65 @@
|
||||
{
|
||||
"event_type": "instance.update",
|
||||
"payload": {
|
||||
"nova_object.data": {
|
||||
"architecture": "x86_64",
|
||||
"audit_period": {
|
||||
"nova_object.data": {
|
||||
"audit_period_beginning": "2012-10-01T00:00:00Z",
|
||||
"audit_period_ending": "2012-10-29T13:42:11Z"},
|
||||
"nova_object.name": "AuditPeriodPayload",
|
||||
"nova_object.namespace": "nova",
|
||||
"nova_object.version": "1.0"
|
||||
},
|
||||
"availability_zone": null,
|
||||
"bandwidth": [],
|
||||
"created_at": "2012-10-29T13:42:11Z",
|
||||
"deleted_at": null,
|
||||
"display_name": "NEW INSTANCE 9966d6bd-a45c-4e1c-9d57-3054899a3ec7",
|
||||
"host": "Node_2",
|
||||
"host_name": "NEW_INSTANCE_9966d6bd-a45c-4e1c-9d57-3054899a3ec7",
|
||||
"image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
|
||||
"kernel_id": "",
|
||||
"launched_at": null,
|
||||
"metadata": {},
|
||||
"node": "hostname_0",
|
||||
"old_display_name": null,
|
||||
"os_type": null,
|
||||
"progress": 0,
|
||||
"ramdisk_id": "",
|
||||
"reservation_id": "r-sd3ygfjj",
|
||||
"state": "paused",
|
||||
"task_state": "scheduling",
|
||||
"power_state": "pending",
|
||||
"ip_addresses": [],
|
||||
"state_update": {
|
||||
"nova_object.data": {
|
||||
"old_task_state": null,
|
||||
"new_task_state": null,
|
||||
"old_state": "paused",
|
||||
"state": "paused"},
|
||||
"nova_object.name": "InstanceStateUpdatePayload",
|
||||
"nova_object.namespace": "nova",
|
||||
"nova_object.version": "1.0"},
|
||||
"tenant_id": "6f70656e737461636b20342065766572",
|
||||
"terminated_at": null,
|
||||
"flavor": {
|
||||
"nova_object.name": "FlavorPayload",
|
||||
"nova_object.data": {
|
||||
"flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3",
|
||||
"root_gb": 1,
|
||||
"vcpus": 1,
|
||||
"ephemeral_gb": 0,
|
||||
"memory_mb": 512
|
||||
},
|
||||
"nova_object.version": "1.0",
|
||||
"nova_object.namespace": "nova"
|
||||
},
|
||||
"user_id": "fake",
|
||||
"uuid": "9966d6bd-a45c-4e1c-9d57-3054899a3ec7"},
|
||||
"nova_object.name": "InstanceUpdatePayload",
|
||||
"nova_object.namespace": "nova",
|
||||
"nova_object.version": "1.0"},
|
||||
"priority": "INFO",
|
||||
"publisher_id": "nova-compute:Node_2"
|
||||
}
|
@ -0,0 +1,52 @@
|
||||
{
|
||||
"publisher_id": "compute:Node_2",
|
||||
"event_type": "compute.instance.update",
|
||||
"payload": {
|
||||
"access_ip_v4": null,
|
||||
"access_ip_v6": null,
|
||||
"architecture": null,
|
||||
"audit_period_beginning": "2016-08-17T13:00:00.000000",
|
||||
"audit_period_ending": "2016-08-17T13:56:05.262440",
|
||||
"availability_zone": "nova",
|
||||
"bandwidth": {},
|
||||
"cell_name": "",
|
||||
"created_at": "2016-08-17 13:53:23+00:00",
|
||||
"deleted_at": "",
|
||||
"disk_gb": 1,
|
||||
"display_name": "NEW INSTANCE 9966d6bd-a45c-4e1c-9d57-3054899a3ec7",
|
||||
"ephemeral_gb": 0,
|
||||
"host": "Node_2",
|
||||
"hostname": "NEW_INSTANCE_9966d6bd-a45c-4e1c-9d57-3054899a3ec7",
|
||||
"image_meta": {
|
||||
"base_image_ref": "205f96f5-91f9-42eb-9138-03fffcea2b97",
|
||||
"container_format": "bare",
|
||||
"disk_format": "qcow2",
|
||||
"min_disk": "1",
|
||||
"min_ram": "0"
|
||||
},
|
||||
"image_ref_url": "http://10.50.0.222:9292/images/205f96f5-91f9-42eb-9138-03fffcea2b97",
|
||||
"instance_flavor_id": "1",
|
||||
"instance_id": "9966d6bd-a45c-4e1c-9d57-3054899a3ec7",
|
||||
"instance_type": "m1.tiny",
|
||||
"instance_type_id": 2,
|
||||
"kernel_id": "",
|
||||
"launched_at": "2016-08-17T13:53:35.000000",
|
||||
"memory_mb": 512,
|
||||
"metadata": {},
|
||||
"new_task_state": null,
|
||||
"node": "hostname_0",
|
||||
"old_state": "paused",
|
||||
"old_task_state": null,
|
||||
"os_type": null,
|
||||
"progress": "",
|
||||
"ramdisk_id": "",
|
||||
"reservation_id": "r-0822ymml",
|
||||
"root_gb": 1,
|
||||
"state": "paused",
|
||||
"state_description": "paused",
|
||||
"tenant_id": "a4b4772d93c74d5e8b7c68cdd2a014e1",
|
||||
"terminated_at": "",
|
||||
"user_id": "ce64facc93354bbfa90f4f9f9a3e1e75",
|
||||
"vcpus": 1
|
||||
}
|
||||
}
|
@ -23,6 +23,7 @@ from oslo_serialization import jsonutils
|
||||
|
||||
from watcher.common import context
|
||||
from watcher.common import exception
|
||||
from watcher.common import nova_helper
|
||||
from watcher.common import service as watcher_service
|
||||
from watcher.decision_engine.model import element
|
||||
from watcher.decision_engine.model import model_root
|
||||
@ -125,7 +126,7 @@ class TestNovaNotifications(NotificationTestCase):
|
||||
handler = novanotification.ServiceUpdated(self.fake_cdmc)
|
||||
|
||||
node0_uuid = 'Node_0'
|
||||
node0 = compute_model.get_node_from_id(node0_uuid)
|
||||
node0 = compute_model.get_node_by_uuid(node0_uuid)
|
||||
|
||||
message = self.load_message('scenario3_service-update.json')
|
||||
|
||||
@ -151,7 +152,7 @@ class TestNovaNotifications(NotificationTestCase):
|
||||
handler = novanotification.InstanceUpdated(self.fake_cdmc)
|
||||
|
||||
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
|
||||
instance0 = compute_model.get_instance_from_id(instance0_uuid)
|
||||
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
|
||||
|
||||
message = self.load_message('scenario3_instance-update.json')
|
||||
|
||||
@ -167,34 +168,46 @@ class TestNovaNotifications(NotificationTestCase):
|
||||
|
||||
self.assertEqual(element.InstanceState.PAUSED.value, instance0.state)
|
||||
|
||||
def test_nova_instance_update_notfound_creates(self):
|
||||
@mock.patch.object(nova_helper, "NovaHelper")
|
||||
def test_nova_instance_update_notfound_still_creates(
|
||||
self, m_nova_helper_cls):
|
||||
m_get_compute_node_by_hostname = mock.Mock(
|
||||
side_effect=lambda uuid: mock.Mock(
|
||||
name='m_get_compute_node_by_hostname',
|
||||
id=3,
|
||||
uuid=uuid,
|
||||
memory_mb=7777,
|
||||
vcpus=42,
|
||||
free_disk_gb=974,
|
||||
local_gb=1337))
|
||||
m_nova_helper_cls.return_value = mock.Mock(
|
||||
get_compute_node_by_hostname=m_get_compute_node_by_hostname,
|
||||
name='m_nova_helper')
|
||||
|
||||
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
|
||||
self.fake_cdmc.cluster_data_model = compute_model
|
||||
handler = novanotification.InstanceUpdated(self.fake_cdmc)
|
||||
|
||||
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
|
||||
instance0_uuid = '9966d6bd-a45c-4e1c-9d57-3054899a3ec7'
|
||||
|
||||
message = self.load_message('scenario3_instance-update.json')
|
||||
message = self.load_message('scenario3_notfound_instance-update.json')
|
||||
|
||||
with mock.patch.object(
|
||||
model_root.ModelRoot, 'get_instance_from_id'
|
||||
) as m_get_instance_from_id:
|
||||
m_get_instance_from_id.side_effect = exception.InstanceNotFound(
|
||||
name='TEST')
|
||||
handler.info(
|
||||
ctxt=self.context,
|
||||
publisher_id=message['publisher_id'],
|
||||
event_type=message['event_type'],
|
||||
payload=message['payload'],
|
||||
metadata=self.FAKE_METADATA,
|
||||
)
|
||||
handler.info(
|
||||
ctxt=self.context,
|
||||
publisher_id=message['publisher_id'],
|
||||
event_type=message['event_type'],
|
||||
payload=message['payload'],
|
||||
metadata=self.FAKE_METADATA,
|
||||
)
|
||||
|
||||
instance0 = compute_model.get_instance_from_id(instance0_uuid)
|
||||
cpu_capacity = compute_model.get_resource_from_id(
|
||||
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
|
||||
cpu_capacity = compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.cpu_cores)
|
||||
disk_capacity = compute_model.get_resource_from_id(
|
||||
disk = compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.disk)
|
||||
memory_capacity = compute_model.get_resource_from_id(
|
||||
disk_capacity = compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.disk_capacity)
|
||||
memory_capacity = compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.memory)
|
||||
|
||||
self.assertEqual(element.InstanceState.PAUSED.value, instance0.state)
|
||||
@ -202,6 +215,60 @@ class TestNovaNotifications(NotificationTestCase):
|
||||
self.assertEqual(1, disk_capacity.get_capacity(instance0))
|
||||
self.assertEqual(512, memory_capacity.get_capacity(instance0))
|
||||
|
||||
m_get_compute_node_by_hostname.assert_called_once_with('Node_2')
|
||||
node_2 = compute_model.get_node_by_uuid('Node_2')
|
||||
self.assertEqual(7777, memory_capacity.get_capacity(node_2))
|
||||
self.assertEqual(42, cpu_capacity.get_capacity(node_2))
|
||||
self.assertEqual(974, disk.get_capacity(node_2))
|
||||
self.assertEqual(1337, disk_capacity.get_capacity(node_2))
|
||||
|
||||
@mock.patch.object(nova_helper, "NovaHelper")
|
||||
def test_instance_update_node_notfound_set_unmapped(
|
||||
self, m_nova_helper_cls):
|
||||
m_get_compute_node_by_hostname = mock.Mock(
|
||||
side_effect=exception.ComputeNodeNotFound)
|
||||
m_nova_helper_cls.return_value = mock.Mock(
|
||||
get_compute_node_by_hostname=m_get_compute_node_by_hostname,
|
||||
name='m_nova_helper')
|
||||
|
||||
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
|
||||
self.fake_cdmc.cluster_data_model = compute_model
|
||||
handler = novanotification.InstanceUpdated(self.fake_cdmc)
|
||||
|
||||
instance0_uuid = '9966d6bd-a45c-4e1c-9d57-3054899a3ec7'
|
||||
|
||||
message = self.load_message(
|
||||
'scenario3_notfound_instance-update.json')
|
||||
|
||||
handler.info(
|
||||
ctxt=self.context,
|
||||
publisher_id=message['publisher_id'],
|
||||
event_type=message['event_type'],
|
||||
payload=message['payload'],
|
||||
metadata=self.FAKE_METADATA,
|
||||
)
|
||||
|
||||
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
|
||||
cpu_capacity = compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.cpu_cores)
|
||||
disk = compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.disk)
|
||||
disk_capacity = compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.disk_capacity)
|
||||
memory_capacity = compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.memory)
|
||||
|
||||
self.assertEqual(element.InstanceState.PAUSED.value, instance0.state)
|
||||
self.assertEqual(1, cpu_capacity.get_capacity(instance0))
|
||||
self.assertEqual(1, disk.get_capacity(instance0))
|
||||
self.assertEqual(1, disk_capacity.get_capacity(instance0))
|
||||
self.assertEqual(512, memory_capacity.get_capacity(instance0))
|
||||
|
||||
m_get_compute_node_by_hostname.assert_any_call('Node_2')
|
||||
self.assertRaises(
|
||||
exception.ComputeNodeNotFound,
|
||||
compute_model.get_node_by_uuid, 'Node_2')
|
||||
|
||||
def test_nova_instance_create(self):
|
||||
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
|
||||
self.fake_cdmc.cluster_data_model = compute_model
|
||||
@ -211,7 +278,7 @@ class TestNovaNotifications(NotificationTestCase):
|
||||
|
||||
self.assertRaises(
|
||||
exception.InstanceNotFound,
|
||||
compute_model.get_instance_from_id, instance0_uuid)
|
||||
compute_model.get_instance_by_uuid, instance0_uuid)
|
||||
|
||||
message = self.load_message('scenario3_instance-create.json')
|
||||
handler.info(
|
||||
@ -222,12 +289,12 @@ class TestNovaNotifications(NotificationTestCase):
|
||||
metadata=self.FAKE_METADATA,
|
||||
)
|
||||
|
||||
instance0 = compute_model.get_instance_from_id(instance0_uuid)
|
||||
cpu_capacity = compute_model.get_resource_from_id(
|
||||
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
|
||||
cpu_capacity = compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.cpu_cores)
|
||||
disk_capacity = compute_model.get_resource_from_id(
|
||||
disk_capacity = compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.disk)
|
||||
memory_capacity = compute_model.get_resource_from_id(
|
||||
memory_capacity = compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.memory)
|
||||
|
||||
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
|
||||
@ -243,7 +310,7 @@ class TestNovaNotifications(NotificationTestCase):
|
||||
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
|
||||
|
||||
# Before
|
||||
self.assertTrue(compute_model.get_instance_from_id(instance0_uuid))
|
||||
self.assertTrue(compute_model.get_instance_by_uuid(instance0_uuid))
|
||||
for resource in compute_model.resource.values():
|
||||
self.assertIn(instance0_uuid, resource.mapping)
|
||||
|
||||
@ -259,7 +326,7 @@ class TestNovaNotifications(NotificationTestCase):
|
||||
# After
|
||||
self.assertRaises(
|
||||
exception.InstanceNotFound,
|
||||
compute_model.get_instance_from_id, instance0_uuid)
|
||||
compute_model.get_instance_by_uuid, instance0_uuid)
|
||||
|
||||
for resource in compute_model.resource.values():
|
||||
self.assertNotIn(instance0_uuid, resource.mapping)
|
||||
@ -282,7 +349,7 @@ class TestLegacyNovaNotifications(NotificationTestCase):
|
||||
instance0_uuid = 'c03c0bf9-f46e-4e4f-93f1-817568567ee2'
|
||||
self.assertRaises(
|
||||
exception.InstanceNotFound,
|
||||
compute_model.get_instance_from_id, instance0_uuid)
|
||||
compute_model.get_instance_by_uuid, instance0_uuid)
|
||||
|
||||
message = self.load_message(
|
||||
'scenario3_legacy_instance-create-end.json')
|
||||
@ -295,12 +362,12 @@ class TestLegacyNovaNotifications(NotificationTestCase):
|
||||
metadata=self.FAKE_METADATA,
|
||||
)
|
||||
|
||||
instance0 = compute_model.get_instance_from_id(instance0_uuid)
|
||||
cpu_capacity = compute_model.get_resource_from_id(
|
||||
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
|
||||
cpu_capacity = compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.cpu_cores)
|
||||
disk_capacity = compute_model.get_resource_from_id(
|
||||
disk_capacity = compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.disk)
|
||||
memory_capacity = compute_model.get_resource_from_id(
|
||||
memory_capacity = compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.memory)
|
||||
|
||||
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
|
||||
@ -314,7 +381,7 @@ class TestLegacyNovaNotifications(NotificationTestCase):
|
||||
handler = novanotification.LegacyInstanceUpdated(self.fake_cdmc)
|
||||
|
||||
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
|
||||
instance0 = compute_model.get_instance_from_id(instance0_uuid)
|
||||
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
|
||||
|
||||
message = self.load_message('scenario3_legacy_instance-update.json')
|
||||
|
||||
@ -330,7 +397,7 @@ class TestLegacyNovaNotifications(NotificationTestCase):
|
||||
|
||||
self.assertEqual(element.InstanceState.PAUSED.value, instance0.state)
|
||||
|
||||
def test_legacy_instance_update_notfound_creates(self):
|
||||
def test_legacy_instance_update_instance_notfound_creates(self):
|
||||
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
|
||||
self.fake_cdmc.cluster_data_model = compute_model
|
||||
handler = novanotification.LegacyInstanceUpdated(self.fake_cdmc)
|
||||
@ -340,9 +407,9 @@ class TestLegacyNovaNotifications(NotificationTestCase):
|
||||
message = self.load_message('scenario3_legacy_instance-update.json')
|
||||
|
||||
with mock.patch.object(
|
||||
model_root.ModelRoot, 'get_instance_from_id'
|
||||
) as m_get_instance_from_id:
|
||||
m_get_instance_from_id.side_effect = exception.InstanceNotFound(
|
||||
model_root.ModelRoot, 'get_instance_by_uuid'
|
||||
) as m_get_instance_by_uuid:
|
||||
m_get_instance_by_uuid.side_effect = exception.InstanceNotFound(
|
||||
name='TEST')
|
||||
handler.info(
|
||||
ctxt=self.context,
|
||||
@ -352,58 +419,121 @@ class TestLegacyNovaNotifications(NotificationTestCase):
|
||||
metadata=self.FAKE_METADATA,
|
||||
)
|
||||
|
||||
instance0 = compute_model.get_instance_from_id(instance0_uuid)
|
||||
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
|
||||
self.assertEqual(element.InstanceState.PAUSED.value, instance0.state)
|
||||
|
||||
def test_legacy_instance_update_node_notfound_stil_creates(self):
|
||||
@mock.patch.object(nova_helper, "NovaHelper")
|
||||
def test_legacy_instance_update_node_notfound_still_creates(
|
||||
self, m_nova_helper_cls):
|
||||
m_get_compute_node_by_hostname = mock.Mock(
|
||||
side_effect=lambda uuid: mock.Mock(
|
||||
name='m_get_compute_node_by_hostname',
|
||||
id=3,
|
||||
uuid=uuid,
|
||||
memory_mb=7777,
|
||||
vcpus=42,
|
||||
free_disk_gb=974,
|
||||
local_gb=1337))
|
||||
m_nova_helper_cls.return_value = mock.Mock(
|
||||
get_compute_node_by_hostname=m_get_compute_node_by_hostname,
|
||||
name='m_nova_helper')
|
||||
|
||||
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
|
||||
self.fake_cdmc.cluster_data_model = compute_model
|
||||
handler = novanotification.LegacyInstanceUpdated(self.fake_cdmc)
|
||||
|
||||
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
|
||||
instance0_uuid = '9966d6bd-a45c-4e1c-9d57-3054899a3ec7'
|
||||
|
||||
message = self.load_message('scenario3_legacy_instance-update.json')
|
||||
message = self.load_message(
|
||||
'scenario3_notfound_legacy_instance-update.json')
|
||||
|
||||
with mock.patch.object(
|
||||
model_root.ModelRoot, 'get_instance_from_id'
|
||||
) as m_get_instance_from_id:
|
||||
m_get_instance_from_id.side_effect = exception.InstanceNotFound(
|
||||
name='TEST')
|
||||
with mock.patch.object(
|
||||
model_root.ModelRoot, 'get_node_from_id'
|
||||
) as m_get_node_from_id:
|
||||
m_get_node_from_id.side_effect = exception.ComputeNodeNotFound(
|
||||
name='TEST')
|
||||
handler.info(
|
||||
ctxt=self.context,
|
||||
publisher_id=message['publisher_id'],
|
||||
event_type=message['event_type'],
|
||||
payload=message['payload'],
|
||||
metadata=self.FAKE_METADATA,
|
||||
)
|
||||
handler.info(
|
||||
ctxt=self.context,
|
||||
publisher_id=message['publisher_id'],
|
||||
event_type=message['event_type'],
|
||||
payload=message['payload'],
|
||||
metadata=self.FAKE_METADATA,
|
||||
)
|
||||
|
||||
instance0 = compute_model.get_instance_from_id(instance0_uuid)
|
||||
cpu_capacity = compute_model.get_resource_from_id(
|
||||
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
|
||||
cpu_capacity = compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.cpu_cores)
|
||||
disk_capacity = compute_model.get_resource_from_id(
|
||||
disk = compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.disk)
|
||||
memory_capacity = compute_model.get_resource_from_id(
|
||||
disk_capacity = compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.disk_capacity)
|
||||
memory_capacity = compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.memory)
|
||||
|
||||
self.assertEqual(element.InstanceState.PAUSED.value, instance0.state)
|
||||
self.assertEqual(1, cpu_capacity.get_capacity(instance0))
|
||||
self.assertEqual(1, disk.get_capacity(instance0))
|
||||
self.assertEqual(1, disk_capacity.get_capacity(instance0))
|
||||
self.assertEqual(512, memory_capacity.get_capacity(instance0))
|
||||
|
||||
m_get_compute_node_by_hostname.assert_any_call('Node_2')
|
||||
node_2 = compute_model.get_node_by_uuid('Node_2')
|
||||
self.assertEqual(7777, memory_capacity.get_capacity(node_2))
|
||||
self.assertEqual(42, cpu_capacity.get_capacity(node_2))
|
||||
self.assertEqual(974, disk.get_capacity(node_2))
|
||||
self.assertEqual(1337, disk_capacity.get_capacity(node_2))
|
||||
|
||||
@mock.patch.object(nova_helper, "NovaHelper")
|
||||
def test_legacy_instance_update_node_notfound_set_unmapped(
|
||||
self, m_nova_helper_cls):
|
||||
m_get_compute_node_by_hostname = mock.Mock(
|
||||
side_effect=exception.ComputeNodeNotFound)
|
||||
m_nova_helper_cls.return_value = mock.Mock(
|
||||
get_compute_node_by_hostname=m_get_compute_node_by_hostname,
|
||||
name='m_nova_helper')
|
||||
|
||||
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
|
||||
self.fake_cdmc.cluster_data_model = compute_model
|
||||
handler = novanotification.LegacyInstanceUpdated(self.fake_cdmc)
|
||||
|
||||
instance0_uuid = '9966d6bd-a45c-4e1c-9d57-3054899a3ec7'
|
||||
|
||||
message = self.load_message(
|
||||
'scenario3_notfound_legacy_instance-update.json')
|
||||
|
||||
handler.info(
|
||||
ctxt=self.context,
|
||||
publisher_id=message['publisher_id'],
|
||||
event_type=message['event_type'],
|
||||
payload=message['payload'],
|
||||
metadata=self.FAKE_METADATA,
|
||||
)
|
||||
|
||||
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
|
||||
cpu_capacity = compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.cpu_cores)
|
||||
disk = compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.disk)
|
||||
disk_capacity = compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.disk_capacity)
|
||||
memory_capacity = compute_model.get_resource_by_uuid(
|
||||
element.ResourceType.memory)
|
||||
|
||||
self.assertEqual(element.InstanceState.PAUSED.value, instance0.state)
|
||||
self.assertEqual(1, cpu_capacity.get_capacity(instance0))
|
||||
self.assertEqual(1, disk.get_capacity(instance0))
|
||||
self.assertEqual(1, disk_capacity.get_capacity(instance0))
|
||||
self.assertEqual(512, memory_capacity.get_capacity(instance0))
|
||||
|
||||
m_get_compute_node_by_hostname.assert_any_call('Node_2')
|
||||
self.assertRaises(
|
||||
exception.ComputeNodeNotFound,
|
||||
compute_model.get_node_by_uuid, 'Node_2')
|
||||
|
||||
def test_legacy_live_migrated_end(self):
|
||||
compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes()
|
||||
self.fake_cdmc.cluster_data_model = compute_model
|
||||
handler = novanotification.LegacyLiveMigratedEnd(self.fake_cdmc)
|
||||
|
||||
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
|
||||
instance0 = compute_model.get_instance_from_id(instance0_uuid)
|
||||
instance0 = compute_model.get_instance_by_uuid(instance0_uuid)
|
||||
|
||||
node = compute_model.get_node_from_instance_id(instance0_uuid)
|
||||
node = compute_model.get_node_by_instance_uuid(instance0_uuid)
|
||||
self.assertEqual('Node_0', node.uuid)
|
||||
|
||||
message = self.load_message(
|
||||
@ -415,7 +545,7 @@ class TestLegacyNovaNotifications(NotificationTestCase):
|
||||
payload=message['payload'],
|
||||
metadata=self.FAKE_METADATA,
|
||||
)
|
||||
node = compute_model.get_node_from_instance_id(instance0_uuid)
|
||||
node = compute_model.get_node_by_instance_uuid(instance0_uuid)
|
||||
self.assertEqual('Node_1', node.uuid)
|
||||
self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state)
|
||||
|
||||
@ -427,7 +557,7 @@ class TestLegacyNovaNotifications(NotificationTestCase):
|
||||
instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc'
|
||||
|
||||
# Before
|
||||
self.assertTrue(compute_model.get_instance_from_id(instance0_uuid))
|
||||
self.assertTrue(compute_model.get_instance_by_uuid(instance0_uuid))
|
||||
for resource in compute_model.resource.values():
|
||||
self.assertIn(instance0_uuid, resource.mapping)
|
||||
|
||||
@ -444,7 +574,7 @@ class TestLegacyNovaNotifications(NotificationTestCase):
|
||||
# After
|
||||
self.assertRaises(
|
||||
exception.InstanceNotFound,
|
||||
compute_model.get_instance_from_id, instance0_uuid)
|
||||
compute_model.get_instance_by_uuid, instance0_uuid)
|
||||
|
||||
for resource in compute_model.resource.values():
|
||||
self.assertNotIn(instance0_uuid, resource.mapping)
|
||||
|
@ -44,10 +44,10 @@ class TestMapping(base.TestCase):
|
||||
node = model.mapping.get_node_from_instance(instance)
|
||||
self.assertEqual('Node_0', node.uuid)
|
||||
|
||||
def test_get_node_from_instance_id(self):
|
||||
def test_get_node_by_instance_uuid(self):
|
||||
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
|
||||
|
||||
nodes = model.mapping.get_node_instances_from_id("BLABLABLA")
|
||||
nodes = model.mapping.get_node_instances_by_uuid("BLABLABLA")
|
||||
self.assertEqual(0, len(nodes))
|
||||
|
||||
def test_get_all_instances(self):
|
||||
@ -74,9 +74,9 @@ class TestMapping(base.TestCase):
|
||||
instances = model.get_all_instances()
|
||||
keys = list(instances.keys())
|
||||
instance0 = instances[keys[0]]
|
||||
node0 = model.mapping.get_node_from_instance_id(instance0.uuid)
|
||||
node0 = model.mapping.get_node_by_instance_uuid(instance0.uuid)
|
||||
instance1 = instances[keys[1]]
|
||||
node1 = model.mapping.get_node_from_instance_id(instance1.uuid)
|
||||
node1 = model.mapping.get_node_by_instance_uuid(instance1.uuid)
|
||||
|
||||
self.assertEqual(
|
||||
False,
|
||||
@ -91,26 +91,24 @@ class TestMapping(base.TestCase):
|
||||
True,
|
||||
model.migrate_instance(instance1, node0, node1))
|
||||
|
||||
def test_unmap_from_id_log_warning(self):
|
||||
def test_unmap_by_uuid_log_warning(self):
|
||||
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
|
||||
instances = model.get_all_instances()
|
||||
keys = list(instances.keys())
|
||||
instance0 = instances[keys[0]]
|
||||
id_ = "{0}".format(uuid.uuid4())
|
||||
node = element.ComputeNode()
|
||||
node.uuid = id_
|
||||
uuid_ = "{0}".format(uuid.uuid4())
|
||||
node = element.ComputeNode(id=1)
|
||||
node.uuid = uuid_
|
||||
|
||||
model.mapping.unmap_from_id(node.uuid, instance0.uuid)
|
||||
# self.assertEqual(len(model.mapping.get_node_instances_from_id(
|
||||
# node.uuid)), 1)
|
||||
model.mapping.unmap_by_uuid(node.uuid, instance0.uuid)
|
||||
|
||||
def test_unmap_from_id(self):
|
||||
def test_unmap_by_uuid(self):
|
||||
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
|
||||
instances = model.get_all_instances()
|
||||
keys = list(instances.keys())
|
||||
instance0 = instances[keys[0]]
|
||||
node0 = model.mapping.get_node_from_instance_id(instance0.uuid)
|
||||
node0 = model.mapping.get_node_by_instance_uuid(instance0.uuid)
|
||||
|
||||
model.mapping.unmap_from_id(node0.uuid, instance0.uuid)
|
||||
self.assertEqual(0, len(model.mapping.get_node_instances_from_id(
|
||||
model.mapping.unmap_by_uuid(node0.uuid, instance0.uuid)
|
||||
self.assertEqual(0, len(model.mapping.get_node_instances_by_uuid(
|
||||
node0.uuid)))
|
||||
|
@ -16,9 +16,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import uuid
|
||||
|
||||
from lxml import etree
|
||||
from oslo_utils import uuidutils
|
||||
import six
|
||||
|
||||
from watcher.common import exception
|
||||
@ -41,131 +40,136 @@ class TestModel(base.TestCase):
|
||||
|
||||
expected_struct_str = """
|
||||
<ModelRoot>
|
||||
<ComputeNode ResourceType.cpu_cores="40" ResourceType.disk="250"
|
||||
ResourceType.memory="132" hostname="hostname_0" human_id=""
|
||||
state="up" status="enabled" uuid="Node_0">
|
||||
<ComputeNode ResourceType.cpu_cores="40" ResourceType.disk="250"
|
||||
ResourceType.disk_capacity="250" ResourceType.memory="132"
|
||||
hostname="hostname_0" human_id="" id="0" state="up"
|
||||
status="enabled" uuid="Node_0">
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.disk_capacity="20" ResourceType.memory="2"
|
||||
hostname="" human_id="" state="active" uuid="INSTANCE_0"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.disk_capacity="20" ResourceType.memory="2"
|
||||
hostname="" human_id="" state="active" uuid="INSTANCE_1"/>
|
||||
</ComputeNode>
|
||||
<ComputeNode ResourceType.cpu_cores="40" ResourceType.disk="250"
|
||||
ResourceType.disk_capacity="250" ResourceType.memory="132"
|
||||
hostname="hostname_1" human_id="" id="1" state="up"
|
||||
status="enabled" uuid="Node_1">
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.disk_capacity="20" ResourceType.memory="2"
|
||||
hostname="" human_id="" state="active" uuid="INSTANCE_2"/>
|
||||
</ComputeNode>
|
||||
<ComputeNode ResourceType.cpu_cores="40" ResourceType.disk="250"
|
||||
ResourceType.disk_capacity="250" ResourceType.memory="132"
|
||||
hostname="hostname_2" human_id="" id="2" state="up"
|
||||
status="enabled" uuid="Node_2">
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.disk_capacity="20" ResourceType.memory="2"
|
||||
hostname="" human_id="" state="active" uuid="INSTANCE_3"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.disk_capacity="20" ResourceType.memory="2"
|
||||
hostname="" human_id="" state="active" uuid="INSTANCE_4"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.disk_capacity="20" ResourceType.memory="2"
|
||||
hostname="" human_id="" state="active" uuid="INSTANCE_5"/>
|
||||
</ComputeNode>
|
||||
<ComputeNode ResourceType.cpu_cores="40" ResourceType.disk="250"
|
||||
ResourceType.disk_capacity="250" ResourceType.memory="132"
|
||||
hostname="hostname_3" human_id="" id="3" state="up"
|
||||
status="enabled" uuid="Node_3">
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.disk_capacity="20" ResourceType.memory="2"
|
||||
hostname="" human_id="" state="active" uuid="INSTANCE_6"/>
|
||||
</ComputeNode>
|
||||
<ComputeNode ResourceType.cpu_cores="40" ResourceType.disk="250"
|
||||
ResourceType.disk_capacity="250" ResourceType.memory="132"
|
||||
hostname="hostname_4" human_id="" id="4" state="up"
|
||||
status="enabled" uuid="Node_4">
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.disk_capacity="20" ResourceType.memory="2"
|
||||
hostname="" human_id="" state="active" uuid="INSTANCE_7"/>
|
||||
</ComputeNode>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.memory="2" hostname="" human_id=""
|
||||
state="active" uuid="INSTANCE_0"/>
|
||||
ResourceType.disk_capacity="20" ResourceType.memory="2"
|
||||
hostname="" human_id="" state="active" uuid="INSTANCE_10"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.memory="2" hostname="" human_id=""
|
||||
state="active" uuid="INSTANCE_1"/>
|
||||
</ComputeNode>
|
||||
<ComputeNode ResourceType.cpu_cores="40" ResourceType.disk="250"
|
||||
ResourceType.memory="132" hostname="hostname_1" human_id=""
|
||||
state="up" status="enabled" uuid="Node_1">
|
||||
ResourceType.disk_capacity="20" ResourceType.memory="2"
|
||||
hostname="" human_id="" state="active" uuid="INSTANCE_11"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.memory="2" hostname="" human_id=""
|
||||
state="active" uuid="INSTANCE_2"/>
|
||||
</ComputeNode>
|
||||
<ComputeNode ResourceType.cpu_cores="40" ResourceType.disk="250"
|
||||
ResourceType.memory="132" hostname="hostname_2" human_id=""
|
||||
state="up" status="enabled" uuid="Node_2">
|
||||
ResourceType.disk_capacity="20" ResourceType.memory="2"
|
||||
hostname="" human_id="" state="active" uuid="INSTANCE_12"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.memory="2" hostname="" human_id=""
|
||||
state="active" uuid="INSTANCE_3"/>
|
||||
ResourceType.disk_capacity="20" ResourceType.memory="2"
|
||||
hostname="" human_id="" state="active" uuid="INSTANCE_13"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.memory="2" hostname="" human_id=""
|
||||
state="active" uuid="INSTANCE_4"/>
|
||||
ResourceType.disk_capacity="20" ResourceType.memory="2"
|
||||
hostname="" human_id="" state="active" uuid="INSTANCE_14"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.memory="2" hostname="" human_id=""
|
||||
state="active" uuid="INSTANCE_5"/>
|
||||
</ComputeNode>
|
||||
<ComputeNode ResourceType.cpu_cores="40" ResourceType.disk="250"
|
||||
ResourceType.memory="132" hostname="hostname_3" human_id=""
|
||||
state="up" status="enabled" uuid="Node_3">
|
||||
ResourceType.disk_capacity="20" ResourceType.memory="2"
|
||||
hostname="" human_id="" state="active" uuid="INSTANCE_15"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.memory="2" hostname="" human_id=""
|
||||
state="active" uuid="INSTANCE_6"/>
|
||||
</ComputeNode>
|
||||
<ComputeNode ResourceType.cpu_cores="40" ResourceType.disk="250"
|
||||
ResourceType.memory="132" hostname="hostname_4" human_id=""
|
||||
state="up" status="enabled" uuid="Node_4">
|
||||
ResourceType.disk_capacity="20" ResourceType.memory="2"
|
||||
hostname="" human_id="" state="active" uuid="INSTANCE_16"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.memory="2" hostname="" human_id=""
|
||||
state="active" uuid="INSTANCE_7"/>
|
||||
</ComputeNode>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.memory="2" hostname="" human_id=""
|
||||
state="active" uuid="INSTANCE_10"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.memory="2" hostname="" human_id=""
|
||||
state="active" uuid="INSTANCE_11"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.memory="2" hostname="" human_id=""
|
||||
state="active" uuid="INSTANCE_12"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.memory="2" hostname="" human_id=""
|
||||
state="active" uuid="INSTANCE_13"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.memory="2" hostname="" human_id=""
|
||||
state="active" uuid="INSTANCE_14"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.memory="2" hostname="" human_id=""
|
||||
state="active" uuid="INSTANCE_15"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.memory="2" hostname="" human_id=""
|
||||
state="active" uuid="INSTANCE_16"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.memory="2" hostname="" human_id=""
|
||||
state="active" uuid="INSTANCE_17"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.memory="2" hostname="" human_id=""
|
||||
state="active" uuid="INSTANCE_18"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.memory="2" hostname="" human_id=""
|
||||
state="active" uuid="INSTANCE_19"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.memory="2" hostname="" human_id=""
|
||||
state="active" uuid="INSTANCE_20"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.memory="2" hostname="" human_id=""
|
||||
state="active" uuid="INSTANCE_21"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.memory="2" hostname="" human_id=""
|
||||
state="active" uuid="INSTANCE_22"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.memory="2" hostname="" human_id=""
|
||||
state="active" uuid="INSTANCE_23"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.memory="2" hostname="" human_id=""
|
||||
state="active" uuid="INSTANCE_24"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.memory="2" hostname="" human_id=""
|
||||
state="active" uuid="INSTANCE_25"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.memory="2" hostname="" human_id=""
|
||||
state="active" uuid="INSTANCE_26"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.memory="2" hostname="" human_id=""
|
||||
state="active" uuid="INSTANCE_27"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.memory="2" hostname="" human_id=""
|
||||
state="active" uuid="INSTANCE_28"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.memory="2" hostname="" human_id=""
|
||||
state="active" uuid="INSTANCE_29"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.memory="2" hostname="" human_id=""
|
||||
state="active" uuid="INSTANCE_30"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.memory="2" hostname="" human_id=""
|
||||
state="active" uuid="INSTANCE_31"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.memory="2" hostname="" human_id=""
|
||||
state="active" uuid="INSTANCE_32"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.memory="2" hostname="" human_id=""
|
||||
state="active" uuid="INSTANCE_33"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.memory="2" hostname="" human_id=""
|
||||
state="active" uuid="INSTANCE_34"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.memory="2" hostname="" human_id=""
|
||||
state="active" uuid="INSTANCE_8"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.memory="2" hostname="" human_id=""
|
||||
state="active" uuid="INSTANCE_9"/>
|
||||
ResourceType.disk_capacity="20" ResourceType.memory="2"
|
||||
hostname="" human_id="" state="active" uuid="INSTANCE_17"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.disk_capacity="20" ResourceType.memory="2"
|
||||
hostname="" human_id="" state="active" uuid="INSTANCE_18"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.disk_capacity="20" ResourceType.memory="2"
|
||||
hostname="" human_id="" state="active" uuid="INSTANCE_19"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.disk_capacity="20" ResourceType.memory="2"
|
||||
hostname="" human_id="" state="active" uuid="INSTANCE_20"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.disk_capacity="20" ResourceType.memory="2"
|
||||
hostname="" human_id="" state="active" uuid="INSTANCE_21"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.disk_capacity="20" ResourceType.memory="2"
|
||||
hostname="" human_id="" state="active" uuid="INSTANCE_22"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.disk_capacity="20" ResourceType.memory="2"
|
||||
hostname="" human_id="" state="active" uuid="INSTANCE_23"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.disk_capacity="20" ResourceType.memory="2"
|
||||
hostname="" human_id="" state="active" uuid="INSTANCE_24"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.disk_capacity="20" ResourceType.memory="2"
|
||||
hostname="" human_id="" state="active" uuid="INSTANCE_25"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.disk_capacity="20" ResourceType.memory="2"
|
||||
hostname="" human_id="" state="active" uuid="INSTANCE_26"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.disk_capacity="20" ResourceType.memory="2"
|
||||
hostname="" human_id="" state="active" uuid="INSTANCE_27"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.disk_capacity="20" ResourceType.memory="2"
|
||||
hostname="" human_id="" state="active" uuid="INSTANCE_28"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.disk_capacity="20" ResourceType.memory="2"
|
||||
hostname="" human_id="" state="active" uuid="INSTANCE_29"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.disk_capacity="20" ResourceType.memory="2"
|
||||
hostname="" human_id="" state="active" uuid="INSTANCE_30"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.disk_capacity="20" ResourceType.memory="2"
|
||||
hostname="" human_id="" state="active" uuid="INSTANCE_31"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.disk_capacity="20" ResourceType.memory="2"
|
||||
hostname="" human_id="" state="active" uuid="INSTANCE_32"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.disk_capacity="20" ResourceType.memory="2"
|
||||
hostname="" human_id="" state="active" uuid="INSTANCE_33"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.disk_capacity="20" ResourceType.memory="2"
|
||||
hostname="" human_id="" state="active" uuid="INSTANCE_34"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.disk_capacity="20" ResourceType.memory="2"
|
||||
hostname="" human_id="" state="active" uuid="INSTANCE_8"/>
|
||||
<Instance ResourceType.cpu_cores="10" ResourceType.disk="20"
|
||||
ResourceType.disk_capacity="20" ResourceType.memory="2"
|
||||
hostname="" human_id="" state="active" uuid="INSTANCE_9"/>
|
||||
</ModelRoot>
|
||||
"""
|
||||
parser = etree.XMLParser(remove_blank_text=True)
|
||||
@ -184,87 +188,87 @@ class TestModel(base.TestCase):
|
||||
|
||||
def test_add_node(self):
|
||||
model = model_root.ModelRoot()
|
||||
id_ = "{0}".format(uuid.uuid4())
|
||||
node = element.ComputeNode()
|
||||
node.uuid = id_
|
||||
uuid_ = "{0}".format(uuidutils.generate_uuid())
|
||||
node = element.ComputeNode(id=1)
|
||||
node.uuid = uuid_
|
||||
model.add_node(node)
|
||||
self.assertEqual(node, model.get_node_from_id(id_))
|
||||
self.assertEqual(node, model.get_node_by_uuid(uuid_))
|
||||
|
||||
def test_delete_node(self):
|
||||
model = model_root.ModelRoot()
|
||||
id_ = "{0}".format(uuid.uuid4())
|
||||
node = element.ComputeNode()
|
||||
node.uuid = id_
|
||||
uuid_ = "{0}".format(uuidutils.generate_uuid())
|
||||
node = element.ComputeNode(id=1)
|
||||
node.uuid = uuid_
|
||||
model.add_node(node)
|
||||
self.assertEqual(node, model.get_node_from_id(id_))
|
||||
self.assertEqual(node, model.get_node_by_uuid(uuid_))
|
||||
model.remove_node(node)
|
||||
self.assertRaises(exception.ComputeNodeNotFound,
|
||||
model.get_node_from_id, id_)
|
||||
model.get_node_by_uuid, uuid_)
|
||||
|
||||
def test_get_all_compute_nodes(self):
|
||||
model = model_root.ModelRoot()
|
||||
for _ in range(10):
|
||||
id_ = "{0}".format(uuid.uuid4())
|
||||
node = element.ComputeNode()
|
||||
node.uuid = id_
|
||||
for id_ in range(10):
|
||||
uuid_ = "{0}".format(uuidutils.generate_uuid())
|
||||
node = element.ComputeNode(id_)
|
||||
node.uuid = uuid_
|
||||
model.add_node(node)
|
||||
all_nodes = model.get_all_compute_nodes()
|
||||
for id_ in all_nodes:
|
||||
node = model.get_node_from_id(id_)
|
||||
for uuid_ in all_nodes:
|
||||
node = model.get_node_by_uuid(uuid_)
|
||||
model.assert_node(node)
|
||||
|
||||
def test_set_get_state_nodes(self):
|
||||
model = model_root.ModelRoot()
|
||||
id_ = "{0}".format(uuid.uuid4())
|
||||
node = element.ComputeNode()
|
||||
node.uuid = id_
|
||||
uuid_ = "{0}".format(uuidutils.generate_uuid())
|
||||
node = element.ComputeNode(id=1)
|
||||
node.uuid = uuid_
|
||||
model.add_node(node)
|
||||
|
||||
self.assertIn(node.state, [el.value for el in element.ServiceState])
|
||||
|
||||
node = model.get_node_from_id(id_)
|
||||
node = model.get_node_by_uuid(uuid_)
|
||||
node.state = element.ServiceState.OFFLINE.value
|
||||
self.assertIn(node.state, [el.value for el in element.ServiceState])
|
||||
|
||||
def test_node_from_id_raise(self):
|
||||
def test_node_from_uuid_raise(self):
|
||||
model = model_root.ModelRoot()
|
||||
id_ = "{0}".format(uuid.uuid4())
|
||||
node = element.ComputeNode()
|
||||
node.uuid = id_
|
||||
uuid_ = "{0}".format(uuidutils.generate_uuid())
|
||||
node = element.ComputeNode(id=1)
|
||||
node.uuid = uuid_
|
||||
model.add_node(node)
|
||||
|
||||
id2 = "{0}".format(uuid.uuid4())
|
||||
uuid2 = "{0}".format(uuidutils.generate_uuid())
|
||||
self.assertRaises(exception.ComputeNodeNotFound,
|
||||
model.get_node_from_id, id2)
|
||||
model.get_node_by_uuid, uuid2)
|
||||
|
||||
def test_remove_node_raise(self):
|
||||
model = model_root.ModelRoot()
|
||||
id_ = "{0}".format(uuid.uuid4())
|
||||
node = element.ComputeNode()
|
||||
node.uuid = id_
|
||||
uuid_ = "{0}".format(uuidutils.generate_uuid())
|
||||
node = element.ComputeNode(id=1)
|
||||
node.uuid = uuid_
|
||||
model.add_node(node)
|
||||
|
||||
id2 = "{0}".format(uuid.uuid4())
|
||||
node2 = element.ComputeNode()
|
||||
node2.uuid = id2
|
||||
uuid2 = "{0}".format(uuidutils.generate_uuid())
|
||||
node2 = element.ComputeNode(id=2)
|
||||
node2.uuid = uuid2
|
||||
|
||||
self.assertRaises(exception.ComputeNodeNotFound,
|
||||
model.remove_node, node2)
|
||||
|
||||
def test_assert_node_raise(self):
|
||||
model = model_root.ModelRoot()
|
||||
id_ = "{0}".format(uuid.uuid4())
|
||||
node = element.ComputeNode()
|
||||
node.uuid = id_
|
||||
uuid_ = "{0}".format(uuidutils.generate_uuid())
|
||||
node = element.ComputeNode(id=1)
|
||||
node.uuid = uuid_
|
||||
model.add_node(node)
|
||||
self.assertRaises(exception.IllegalArgumentException,
|
||||
model.assert_node, "objet_qcq")
|
||||
|
||||
def test_instance_from_id_raise(self):
|
||||
def test_instance_from_uuid_raise(self):
|
||||
fake_cluster = faker_cluster_state.FakerModelCollector()
|
||||
model = fake_cluster.generate_scenario_1()
|
||||
self.assertRaises(exception.InstanceNotFound,
|
||||
model.get_instance_from_id, "valeur_qcq")
|
||||
model.get_instance_by_uuid, "valeur_qcq")
|
||||
|
||||
def test_assert_instance_raise(self):
|
||||
model = model_root.ModelRoot()
|
||||
|
@ -56,11 +56,11 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
|
||||
current_state_cluster.create_resource(disk)
|
||||
current_state_cluster.create_resource(disk_capacity)
|
||||
|
||||
for i in range(0, count_node):
|
||||
node_uuid = "Node_{0}".format(i)
|
||||
node = element.ComputeNode()
|
||||
for id_ in range(0, count_node):
|
||||
node_uuid = "Node_{0}".format(id_)
|
||||
node = element.ComputeNode(id_)
|
||||
node.uuid = node_uuid
|
||||
node.hostname = "hostname_{0}".format(i)
|
||||
node.hostname = "hostname_{0}".format(id_)
|
||||
node.state = 'enabled'
|
||||
|
||||
mem.set_capacity(node, 64)
|
||||
@ -79,12 +79,12 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
|
||||
current_state_cluster.add_instance(instance)
|
||||
|
||||
current_state_cluster.get_mapping().map(
|
||||
current_state_cluster.get_node_from_id("Node_0"),
|
||||
current_state_cluster.get_instance_from_id("INSTANCE_0"))
|
||||
current_state_cluster.get_node_by_uuid("Node_0"),
|
||||
current_state_cluster.get_instance_by_uuid("INSTANCE_0"))
|
||||
|
||||
current_state_cluster.get_mapping().map(
|
||||
current_state_cluster.get_node_from_id("Node_1"),
|
||||
current_state_cluster.get_instance_from_id("INSTANCE_1"))
|
||||
current_state_cluster.get_node_by_uuid("Node_1"),
|
||||
current_state_cluster.get_instance_by_uuid("INSTANCE_1"))
|
||||
|
||||
return current_state_cluster
|
||||
|
||||
@ -109,11 +109,11 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
|
||||
current_state_cluster.create_resource(disk)
|
||||
current_state_cluster.create_resource(disk_capacity)
|
||||
|
||||
for i in range(0, count_node):
|
||||
node_uuid = "Node_{0}".format(i)
|
||||
node = element.ComputeNode()
|
||||
for id_ in range(0, count_node):
|
||||
node_uuid = "Node_{0}".format(id_)
|
||||
node = element.ComputeNode(id_)
|
||||
node.uuid = node_uuid
|
||||
node.hostname = "hostname_{0}".format(i)
|
||||
node.hostname = "hostname_{0}".format(id_)
|
||||
node.state = 'up'
|
||||
|
||||
mem.set_capacity(node, 64)
|
||||
@ -132,8 +132,8 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
|
||||
current_state_cluster.add_instance(instance)
|
||||
|
||||
current_state_cluster.get_mapping().map(
|
||||
current_state_cluster.get_node_from_id("Node_0"),
|
||||
current_state_cluster.get_instance_from_id("INSTANCE_%s" % i))
|
||||
current_state_cluster.get_node_by_uuid("Node_0"),
|
||||
current_state_cluster.get_instance_by_uuid("INSTANCE_%s" % i))
|
||||
|
||||
return current_state_cluster
|
||||
|
||||
@ -158,11 +158,11 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
|
||||
current_state_cluster.create_resource(disk)
|
||||
current_state_cluster.create_resource(disk_capacity)
|
||||
|
||||
for i in range(0, count_node):
|
||||
node_uuid = "Node_{0}".format(i)
|
||||
node = element.ComputeNode()
|
||||
for id_ in range(0, count_node):
|
||||
node_uuid = "Node_{0}".format(id_)
|
||||
node = element.ComputeNode(id_)
|
||||
node.uuid = node_uuid
|
||||
node.hostname = "hostname_{0}".format(i)
|
||||
node.hostname = "hostname_{0}".format(id_)
|
||||
node.state = 'up'
|
||||
|
||||
mem.set_capacity(node, 64)
|
||||
@ -177,12 +177,12 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
|
||||
instance.state = element.InstanceState.ACTIVE.value
|
||||
mem.set_capacity(instance, 2)
|
||||
disk.set_capacity(instance, 20)
|
||||
num_cores.set_capacity(instance, 2 ** (i-6))
|
||||
num_cores.set_capacity(instance, 2 ** (i - 6))
|
||||
current_state_cluster.add_instance(instance)
|
||||
|
||||
current_state_cluster.get_mapping().map(
|
||||
current_state_cluster.get_node_from_id("Node_0"),
|
||||
current_state_cluster.get_instance_from_id("INSTANCE_%s" % i))
|
||||
current_state_cluster.get_node_by_uuid("Node_0"),
|
||||
current_state_cluster.get_instance_by_uuid("INSTANCE_%s" % i))
|
||||
|
||||
return current_state_cluster
|
||||
|
||||
@ -213,14 +213,14 @@ class FakeCeilometerMetrics(object):
|
||||
"""
|
||||
|
||||
id = '%s_%s' % (r_id.split('_')[0], r_id.split('_')[1])
|
||||
instances = self.model.get_mapping().get_node_instances_from_id(id)
|
||||
instances = self.model.get_mapping().get_node_instances_by_uuid(id)
|
||||
util_sum = 0.0
|
||||
node_cpu_cores = self.model.get_resource_from_id(
|
||||
element.ResourceType.cpu_cores).get_capacity_from_id(id)
|
||||
node_cpu_cores = self.model.get_resource_by_uuid(
|
||||
element.ResourceType.cpu_cores).get_capacity_by_uuid(id)
|
||||
for instance_uuid in instances:
|
||||
instance_cpu_cores = self.model.get_resource_from_id(
|
||||
instance_cpu_cores = self.model.get_resource_by_uuid(
|
||||
element.ResourceType.cpu_cores).\
|
||||
get_capacity(self.model.get_instance_from_id(instance_uuid))
|
||||
get_capacity(self.model.get_instance_by_uuid(instance_uuid))
|
||||
total_cpu_util = instance_cpu_cores * self.get_instance_cpu_util(
|
||||
instance_uuid)
|
||||
util_sum += total_cpu_util / 100.0
|
||||
|
@ -53,19 +53,22 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
|
||||
# 2199.954 Mhz
|
||||
num_cores = element.Resource(element.ResourceType.cpu_cores)
|
||||
disk = element.Resource(element.ResourceType.disk)
|
||||
disk_capacity = element.Resource(element.ResourceType.disk_capacity)
|
||||
|
||||
current_state_cluster.create_resource(mem)
|
||||
current_state_cluster.create_resource(num_cores)
|
||||
current_state_cluster.create_resource(disk)
|
||||
current_state_cluster.create_resource(disk_capacity)
|
||||
|
||||
for i in range(0, node_count):
|
||||
node_uuid = "Node_{0}".format(i)
|
||||
node = element.ComputeNode()
|
||||
for id_ in range(0, node_count):
|
||||
node_uuid = "Node_{0}".format(id_)
|
||||
node = element.ComputeNode(id_)
|
||||
node.uuid = node_uuid
|
||||
node.hostname = "hostname_{0}".format(i)
|
||||
node.hostname = "hostname_{0}".format(id_)
|
||||
|
||||
mem.set_capacity(node, 132)
|
||||
disk.set_capacity(node, 250)
|
||||
disk_capacity.set_capacity(node, 250)
|
||||
num_cores.set_capacity(node, 40)
|
||||
current_state_cluster.add_node(node)
|
||||
|
||||
@ -75,48 +78,49 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
|
||||
instance.uuid = instance_uuid
|
||||
mem.set_capacity(instance, 2)
|
||||
disk.set_capacity(instance, 20)
|
||||
disk_capacity.set_capacity(instance, 20)
|
||||
num_cores.set_capacity(instance, 10)
|
||||
instances.append(instance)
|
||||
current_state_cluster.add_instance(instance)
|
||||
|
||||
current_state_cluster.get_mapping().map(
|
||||
current_state_cluster.get_node_from_id("Node_0"),
|
||||
current_state_cluster.get_instance_from_id("INSTANCE_0"))
|
||||
current_state_cluster.get_node_by_uuid("Node_0"),
|
||||
current_state_cluster.get_instance_by_uuid("INSTANCE_0"))
|
||||
|
||||
current_state_cluster.get_mapping().map(
|
||||
current_state_cluster.get_node_from_id("Node_0"),
|
||||
current_state_cluster.get_instance_from_id("INSTANCE_1"))
|
||||
current_state_cluster.get_node_by_uuid("Node_0"),
|
||||
current_state_cluster.get_instance_by_uuid("INSTANCE_1"))
|
||||
|
||||
current_state_cluster.get_mapping().map(
|
||||
current_state_cluster.get_node_from_id("Node_1"),
|
||||
current_state_cluster.get_instance_from_id("INSTANCE_2"))
|
||||
current_state_cluster.get_node_by_uuid("Node_1"),
|
||||
current_state_cluster.get_instance_by_uuid("INSTANCE_2"))
|
||||
|
||||
current_state_cluster.get_mapping().map(
|
||||
current_state_cluster.get_node_from_id("Node_2"),
|
||||
current_state_cluster.get_instance_from_id("INSTANCE_3"))
|
||||
current_state_cluster.get_node_by_uuid("Node_2"),
|
||||
current_state_cluster.get_instance_by_uuid("INSTANCE_3"))
|
||||
|
||||
current_state_cluster.get_mapping().map(
|
||||
current_state_cluster.get_node_from_id("Node_2"),
|
||||
current_state_cluster.get_instance_from_id("INSTANCE_4"))
|
||||
current_state_cluster.get_node_by_uuid("Node_2"),
|
||||
current_state_cluster.get_instance_by_uuid("INSTANCE_4"))
|
||||
|
||||
current_state_cluster.get_mapping().map(
|
||||
current_state_cluster.get_node_from_id("Node_2"),
|
||||
current_state_cluster.get_instance_from_id("INSTANCE_5"))
|
||||
current_state_cluster.get_node_by_uuid("Node_2"),
|
||||
current_state_cluster.get_instance_by_uuid("INSTANCE_5"))
|
||||
|
||||
current_state_cluster.get_mapping().map(
|
||||
current_state_cluster.get_node_from_id("Node_3"),
|
||||
current_state_cluster.get_instance_from_id("INSTANCE_6"))
|
||||
current_state_cluster.get_node_by_uuid("Node_3"),
|
||||
current_state_cluster.get_instance_by_uuid("INSTANCE_6"))
|
||||
|
||||
current_state_cluster.get_mapping().map(
|
||||
current_state_cluster.get_node_from_id("Node_4"),
|
||||
current_state_cluster.get_instance_from_id("INSTANCE_7"))
|
||||
current_state_cluster.get_node_by_uuid("Node_4"),
|
||||
current_state_cluster.get_instance_by_uuid("INSTANCE_7"))
|
||||
|
||||
return current_state_cluster
|
||||
|
||||
def map(self, model, h_id, instance_id):
|
||||
model.get_mapping().map(
|
||||
model.get_node_from_id(h_id),
|
||||
model.get_instance_from_id(instance_id))
|
||||
model.get_node_by_uuid(h_id),
|
||||
model.get_instance_by_uuid(instance_id))
|
||||
|
||||
def generate_scenario_3_with_2_nodes(self):
|
||||
instances = []
|
||||
@ -130,19 +134,22 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
|
||||
# 2199.954 Mhz
|
||||
num_cores = element.Resource(element.ResourceType.cpu_cores)
|
||||
disk = element.Resource(element.ResourceType.disk)
|
||||
disk_capacity = element.Resource(element.ResourceType.disk_capacity)
|
||||
|
||||
root.create_resource(mem)
|
||||
root.create_resource(num_cores)
|
||||
root.create_resource(disk)
|
||||
root.create_resource(disk_capacity)
|
||||
|
||||
for i in range(0, node_count):
|
||||
node_uuid = "Node_{0}".format(i)
|
||||
node = element.ComputeNode()
|
||||
for id_ in range(0, node_count):
|
||||
node_uuid = "Node_{0}".format(id_)
|
||||
node = element.ComputeNode(id_)
|
||||
node.uuid = node_uuid
|
||||
node.hostname = "hostname_{0}".format(i)
|
||||
node.hostname = "hostname_{0}".format(id_)
|
||||
|
||||
mem.set_capacity(node, 132)
|
||||
disk.set_capacity(node, 250)
|
||||
disk_capacity.set_capacity(node, 250)
|
||||
num_cores.set_capacity(node, 40)
|
||||
root.add_node(node)
|
||||
|
||||
@ -150,6 +157,7 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
|
||||
instance1.uuid = "73b09e16-35b7-4922-804e-e8f5d9b740fc"
|
||||
mem.set_capacity(instance1, 2)
|
||||
disk.set_capacity(instance1, 20)
|
||||
disk_capacity.set_capacity(instance1, 20)
|
||||
num_cores.set_capacity(instance1, 10)
|
||||
instances.append(instance1)
|
||||
root.add_instance(instance1)
|
||||
@ -158,15 +166,16 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
|
||||
instance2.uuid = "a4cab39b-9828-413a-bf88-f76921bf1517"
|
||||
mem.set_capacity(instance2, 2)
|
||||
disk.set_capacity(instance2, 20)
|
||||
disk_capacity.set_capacity(instance2, 20)
|
||||
num_cores.set_capacity(instance2, 10)
|
||||
instances.append(instance2)
|
||||
root.add_instance(instance2)
|
||||
|
||||
root.get_mapping().map(root.get_node_from_id("Node_0"),
|
||||
root.get_instance_from_id(str(instance1.uuid)))
|
||||
root.get_mapping().map(root.get_node_by_uuid("Node_0"),
|
||||
root.get_instance_by_uuid(str(instance1.uuid)))
|
||||
|
||||
root.get_mapping().map(root.get_node_from_id("Node_1"),
|
||||
root.get_instance_from_id(str(instance2.uuid)))
|
||||
root.get_mapping().map(root.get_node_by_uuid("Node_1"),
|
||||
root.get_instance_by_uuid(str(instance2.uuid)))
|
||||
|
||||
return root
|
||||
|
||||
@ -180,19 +189,22 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
|
||||
# 2199.954 Mhz
|
||||
num_cores = element.Resource(element.ResourceType.cpu_cores)
|
||||
disk = element.Resource(element.ResourceType.disk)
|
||||
disk_capacity = element.Resource(element.ResourceType.disk_capacity)
|
||||
|
||||
current_state_cluster.create_resource(mem)
|
||||
current_state_cluster.create_resource(num_cores)
|
||||
current_state_cluster.create_resource(disk)
|
||||
current_state_cluster.create_resource(disk_capacity)
|
||||
|
||||
for i in range(0, node_count):
|
||||
node_uuid = "Node_{0}".format(i)
|
||||
node = element.ComputeNode()
|
||||
for id_ in range(0, node_count):
|
||||
node_uuid = "Node_{0}".format(id_)
|
||||
node = element.ComputeNode(id_)
|
||||
node.uuid = node_uuid
|
||||
node.hostname = "hostname_{0}".format(i)
|
||||
node.hostname = "hostname_{0}".format(id_)
|
||||
|
||||
mem.set_capacity(node, 1)
|
||||
disk.set_capacity(node, 1)
|
||||
disk_capacity.set_capacity(node, 1)
|
||||
num_cores.set_capacity(node, 1)
|
||||
current_state_cluster.add_node(node)
|
||||
|
||||
@ -211,19 +223,22 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
|
||||
# 2199.954 Mhz
|
||||
num_cores = element.Resource(element.ResourceType.cpu_cores)
|
||||
disk = element.Resource(element.ResourceType.disk)
|
||||
disk_capacity = element.Resource(element.ResourceType.disk_capacity)
|
||||
|
||||
current_state_cluster.create_resource(mem)
|
||||
current_state_cluster.create_resource(num_cores)
|
||||
current_state_cluster.create_resource(disk)
|
||||
current_state_cluster.create_resource(disk_capacity)
|
||||
|
||||
for i in range(0, node_count):
|
||||
node_uuid = "Node_{0}".format(i)
|
||||
node = element.ComputeNode()
|
||||
for id_ in range(0, node_count):
|
||||
node_uuid = "Node_{0}".format(id_)
|
||||
node = element.ComputeNode(id_)
|
||||
node.uuid = node_uuid
|
||||
node.hostname = "hostname_{0}".format(i)
|
||||
node.hostname = "hostname_{0}".format(id_)
|
||||
|
||||
mem.set_capacity(node, 4)
|
||||
disk.set_capacity(node, 4)
|
||||
disk_capacity.set_capacity(node, 4)
|
||||
num_cores.set_capacity(node, 4)
|
||||
current_state_cluster.add_node(node)
|
||||
|
||||
@ -233,13 +248,14 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
|
||||
instance.uuid = instance_uuid
|
||||
mem.set_capacity(instance, 2)
|
||||
disk.set_capacity(instance, 0)
|
||||
disk_capacity.set_capacity(instance, 0)
|
||||
num_cores.set_capacity(instance, 4)
|
||||
instances.append(instance)
|
||||
current_state_cluster.add_instance(instance)
|
||||
|
||||
current_state_cluster.get_mapping().map(
|
||||
current_state_cluster.get_node_from_id("Node_0"),
|
||||
current_state_cluster.get_instance_from_id("INSTANCE_0"))
|
||||
current_state_cluster.get_node_by_uuid("Node_0"),
|
||||
current_state_cluster.get_instance_by_uuid("INSTANCE_0"))
|
||||
|
||||
return current_state_cluster
|
||||
|
||||
@ -254,19 +270,22 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
|
||||
# 2199.954 Mhz
|
||||
num_cores = element.Resource(element.ResourceType.cpu_cores)
|
||||
disk = element.Resource(element.ResourceType.disk)
|
||||
disk_capacity = element.Resource(element.ResourceType.disk_capacity)
|
||||
|
||||
root.create_resource(mem)
|
||||
root.create_resource(num_cores)
|
||||
root.create_resource(disk)
|
||||
root.create_resource(disk_capacity)
|
||||
|
||||
for i in range(0, node_count):
|
||||
node_uuid = "Node_{0}".format(i)
|
||||
node = element.ComputeNode()
|
||||
for id_ in range(0, node_count):
|
||||
node_uuid = "Node_{0}".format(id_)
|
||||
node = element.ComputeNode(id_)
|
||||
node.uuid = node_uuid
|
||||
node.hostname = "hostname_{0}".format(i)
|
||||
node.hostname = "hostname_{0}".format(id_)
|
||||
|
||||
mem.set_capacity(node, 132)
|
||||
disk.set_capacity(node, 250)
|
||||
disk_capacity.set_capacity(node, 250)
|
||||
num_cores.set_capacity(node, 40)
|
||||
root.add_node(node)
|
||||
|
||||
@ -274,6 +293,7 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
|
||||
instance1.uuid = "INSTANCE_1"
|
||||
mem.set_capacity(instance1, 2)
|
||||
disk.set_capacity(instance1, 20)
|
||||
disk_capacity.set_capacity(instance1, 20)
|
||||
num_cores.set_capacity(instance1, 10)
|
||||
instances.append(instance1)
|
||||
root.add_instance(instance1)
|
||||
@ -282,6 +302,7 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
|
||||
instance11.uuid = "73b09e16-35b7-4922-804e-e8f5d9b740fc"
|
||||
mem.set_capacity(instance11, 2)
|
||||
disk.set_capacity(instance11, 20)
|
||||
disk_capacity.set_capacity(instance11, 20)
|
||||
num_cores.set_capacity(instance11, 10)
|
||||
instances.append(instance11)
|
||||
root.add_instance(instance11)
|
||||
@ -290,6 +311,7 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
|
||||
instance2.uuid = "INSTANCE_3"
|
||||
mem.set_capacity(instance2, 2)
|
||||
disk.set_capacity(instance2, 20)
|
||||
disk_capacity.set_capacity(instance2, 20)
|
||||
num_cores.set_capacity(instance2, 10)
|
||||
instances.append(instance2)
|
||||
root.add_instance(instance2)
|
||||
@ -298,19 +320,20 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
|
||||
instance21.uuid = "INSTANCE_4"
|
||||
mem.set_capacity(instance21, 2)
|
||||
disk.set_capacity(instance21, 20)
|
||||
disk_capacity.set_capacity(instance21, 20)
|
||||
num_cores.set_capacity(instance21, 10)
|
||||
instances.append(instance21)
|
||||
root.add_instance(instance21)
|
||||
|
||||
root.get_mapping().map(root.get_node_from_id("Node_0"),
|
||||
root.get_instance_from_id(str(instance1.uuid)))
|
||||
root.get_mapping().map(root.get_node_from_id("Node_0"),
|
||||
root.get_instance_from_id(str(instance11.uuid)))
|
||||
root.get_mapping().map(root.get_node_by_uuid("Node_0"),
|
||||
root.get_instance_by_uuid(str(instance1.uuid)))
|
||||
root.get_mapping().map(root.get_node_by_uuid("Node_0"),
|
||||
root.get_instance_by_uuid(str(instance11.uuid)))
|
||||
|
||||
root.get_mapping().map(root.get_node_from_id("Node_1"),
|
||||
root.get_instance_from_id(str(instance2.uuid)))
|
||||
root.get_mapping().map(root.get_node_from_id("Node_1"),
|
||||
root.get_instance_from_id(str(instance21.uuid)))
|
||||
root.get_mapping().map(root.get_node_by_uuid("Node_1"),
|
||||
root.get_instance_by_uuid(str(instance2.uuid)))
|
||||
root.get_mapping().map(root.get_node_by_uuid("Node_1"),
|
||||
root.get_instance_by_uuid(str(instance21.uuid)))
|
||||
return root
|
||||
|
||||
def generate_scenario_7_with_2_nodes(self):
|
||||
@ -324,19 +347,22 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
|
||||
# 2199.954 Mhz
|
||||
num_cores = element.Resource(element.ResourceType.cpu_cores)
|
||||
disk = element.Resource(element.ResourceType.disk)
|
||||
disk_capacity = element.Resource(element.ResourceType.disk_capacity)
|
||||
|
||||
root.create_resource(mem)
|
||||
root.create_resource(num_cores)
|
||||
root.create_resource(disk)
|
||||
root.create_resource(disk_capacity)
|
||||
|
||||
for i in range(0, count_node):
|
||||
node_uuid = "Node_{0}".format(i)
|
||||
node = element.ComputeNode()
|
||||
for id_ in range(0, count_node):
|
||||
node_uuid = "Node_{0}".format(id_)
|
||||
node = element.ComputeNode(id_)
|
||||
node.uuid = node_uuid
|
||||
node.hostname = "hostname_{0}".format(i)
|
||||
node.hostname = "hostname_{0}".format(id_)
|
||||
|
||||
mem.set_capacity(node, 132)
|
||||
disk.set_capacity(node, 250)
|
||||
disk_capacity.set_capacity(node, 250)
|
||||
num_cores.set_capacity(node, 50)
|
||||
root.add_node(node)
|
||||
|
||||
@ -344,6 +370,7 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
|
||||
instance1.uuid = "cae81432-1631-4d4e-b29c-6f3acdcde906"
|
||||
mem.set_capacity(instance1, 2)
|
||||
disk.set_capacity(instance1, 20)
|
||||
disk_capacity.set_capacity(instance1, 20)
|
||||
num_cores.set_capacity(instance1, 15)
|
||||
instances.append(instance1)
|
||||
root.add_instance(instance1)
|
||||
@ -352,6 +379,7 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
|
||||
instance11.uuid = "73b09e16-35b7-4922-804e-e8f5d9b740fc"
|
||||
mem.set_capacity(instance11, 2)
|
||||
disk.set_capacity(instance11, 20)
|
||||
disk_capacity.set_capacity(instance11, 20)
|
||||
num_cores.set_capacity(instance11, 10)
|
||||
instances.append(instance11)
|
||||
root.add_instance(instance11)
|
||||
@ -360,6 +388,7 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
|
||||
instance2.uuid = "INSTANCE_3"
|
||||
mem.set_capacity(instance2, 2)
|
||||
disk.set_capacity(instance2, 20)
|
||||
disk_capacity.set_capacity(instance2, 20)
|
||||
num_cores.set_capacity(instance2, 10)
|
||||
instances.append(instance2)
|
||||
root.add_instance(instance2)
|
||||
@ -368,17 +397,18 @@ class FakerModelCollector(base.BaseClusterDataModelCollector):
|
||||
instance21.uuid = "INSTANCE_4"
|
||||
mem.set_capacity(instance21, 2)
|
||||
disk.set_capacity(instance21, 20)
|
||||
disk_capacity.set_capacity(instance21, 20)
|
||||
num_cores.set_capacity(instance21, 10)
|
||||
instances.append(instance21)
|
||||
root.add_instance(instance21)
|
||||
|
||||
root.get_mapping().map(root.get_node_from_id("Node_0"),
|
||||
root.get_instance_from_id(str(instance1.uuid)))
|
||||
root.get_mapping().map(root.get_node_from_id("Node_0"),
|
||||
root.get_instance_from_id(str(instance11.uuid)))
|
||||
root.get_mapping().map(root.get_node_by_uuid("Node_0"),
|
||||
root.get_instance_by_uuid(str(instance1.uuid)))
|
||||
root.get_mapping().map(root.get_node_by_uuid("Node_0"),
|
||||
root.get_instance_by_uuid(str(instance11.uuid)))
|
||||
|
||||
root.get_mapping().map(root.get_node_from_id("Node_1"),
|
||||
root.get_instance_from_id(str(instance2.uuid)))
|
||||
root.get_mapping().map(root.get_node_from_id("Node_1"),
|
||||
root.get_instance_from_id(str(instance21.uuid)))
|
||||
root.get_mapping().map(root.get_node_by_uuid("Node_1"),
|
||||
root.get_instance_by_uuid(str(instance2.uuid)))
|
||||
root.get_mapping().map(root.get_node_by_uuid("Node_1"),
|
||||
root.get_instance_by_uuid(str(instance21.uuid)))
|
||||
return root
|
||||
|
@ -20,7 +20,9 @@ import collections
|
||||
import mock
|
||||
|
||||
from watcher.applier.loading import default
|
||||
from watcher.common import clients
|
||||
from watcher.common import exception
|
||||
from watcher.decision_engine.model.collector import nova
|
||||
from watcher.decision_engine.model import model_root
|
||||
from watcher.decision_engine.strategy import strategies
|
||||
from watcher.tests import base
|
||||
@ -39,9 +41,13 @@ class TestBasicConsolidation(base.TestCase):
|
||||
# fake cluster
|
||||
self.fake_cluster = faker_cluster_state.FakerModelCollector()
|
||||
|
||||
p_osc = mock.patch.object(
|
||||
clients, "OpenStackClients")
|
||||
self.m_osc = p_osc.start()
|
||||
self.addCleanup(p_osc.stop)
|
||||
|
||||
p_model = mock.patch.object(
|
||||
strategies.BasicConsolidation, "compute_model",
|
||||
new_callable=mock.PropertyMock)
|
||||
nova.NovaClusterDataModelCollector, "execute")
|
||||
self.m_model = p_model.start()
|
||||
self.addCleanup(p_model.stop)
|
||||
|
||||
@ -67,39 +73,39 @@ class TestBasicConsolidation(base.TestCase):
|
||||
self.m_model.return_value = model
|
||||
node_1_score = 0.023333333333333317
|
||||
self.assertEqual(node_1_score, self.strategy.calculate_score_node(
|
||||
model.get_node_from_id("Node_1")))
|
||||
model.get_node_by_uuid("Node_1")))
|
||||
node_2_score = 0.26666666666666666
|
||||
self.assertEqual(node_2_score, self.strategy.calculate_score_node(
|
||||
model.get_node_from_id("Node_2")))
|
||||
model.get_node_by_uuid("Node_2")))
|
||||
node_0_score = 0.023333333333333317
|
||||
self.assertEqual(node_0_score, self.strategy.calculate_score_node(
|
||||
model.get_node_from_id("Node_0")))
|
||||
model.get_node_by_uuid("Node_0")))
|
||||
|
||||
def test_basic_consolidation_score_instance(self):
|
||||
model = self.fake_cluster.generate_scenario_1()
|
||||
self.m_model.return_value = model
|
||||
instance_0 = model.get_instance_from_id("INSTANCE_0")
|
||||
instance_0 = model.get_instance_by_uuid("INSTANCE_0")
|
||||
instance_0_score = 0.023333333333333317
|
||||
self.assertEqual(
|
||||
instance_0_score,
|
||||
self.strategy.calculate_score_instance(instance_0))
|
||||
|
||||
instance_1 = model.get_instance_from_id("INSTANCE_1")
|
||||
instance_1 = model.get_instance_by_uuid("INSTANCE_1")
|
||||
instance_1_score = 0.023333333333333317
|
||||
self.assertEqual(
|
||||
instance_1_score,
|
||||
self.strategy.calculate_score_instance(instance_1))
|
||||
instance_2 = model.get_instance_from_id("INSTANCE_2")
|
||||
instance_2 = model.get_instance_by_uuid("INSTANCE_2")
|
||||
instance_2_score = 0.033333333333333326
|
||||
self.assertEqual(
|
||||
instance_2_score,
|
||||
self.strategy.calculate_score_instance(instance_2))
|
||||
instance_6 = model.get_instance_from_id("INSTANCE_6")
|
||||
instance_6 = model.get_instance_by_uuid("INSTANCE_6")
|
||||
instance_6_score = 0.02666666666666669
|
||||
self.assertEqual(
|
||||
instance_6_score,
|
||||
self.strategy.calculate_score_instance(instance_6))
|
||||
instance_7 = model.get_instance_from_id("INSTANCE_7")
|
||||
instance_7 = model.get_instance_by_uuid("INSTANCE_7")
|
||||
instance_7_score = 0.013333333333333345
|
||||
self.assertEqual(
|
||||
instance_7_score,
|
||||
@ -108,7 +114,7 @@ class TestBasicConsolidation(base.TestCase):
|
||||
def test_basic_consolidation_score_instance_disk(self):
|
||||
model = self.fake_cluster.generate_scenario_5_with_instance_disk_0()
|
||||
self.m_model.return_value = model
|
||||
instance_0 = model.get_instance_from_id("INSTANCE_0")
|
||||
instance_0 = model.get_instance_by_uuid("INSTANCE_0")
|
||||
instance_0_score = 0.023333333333333355
|
||||
self.assertEqual(
|
||||
instance_0_score,
|
||||
@ -117,7 +123,7 @@ class TestBasicConsolidation(base.TestCase):
|
||||
def test_basic_consolidation_weight(self):
|
||||
model = self.fake_cluster.generate_scenario_1()
|
||||
self.m_model.return_value = model
|
||||
instance_0 = model.get_instance_from_id("INSTANCE_0")
|
||||
instance_0 = model.get_instance_by_uuid("INSTANCE_0")
|
||||
cores = 16
|
||||
# 80 Go
|
||||
disk = 80
|
||||
@ -162,6 +168,14 @@ class TestBasicConsolidation(base.TestCase):
|
||||
self.assertFalse(self.strategy.check_threshold(
|
||||
node0, 1000, 1000, 1000))
|
||||
|
||||
def test_basic_consolidation_works_on_model_copy(self):
|
||||
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
|
||||
self.m_model.return_value = model
|
||||
|
||||
self.assertEqual(
|
||||
model.to_string(), self.strategy.compute_model.to_string())
|
||||
self.assertIsNot(model, self.strategy.compute_model)
|
||||
|
||||
def test_basic_consolidation_migration(self):
|
||||
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
|
||||
self.m_model.return_value = model
|
||||
|
@ -65,10 +65,10 @@ class TestOutletTempControl(base.TestCase):
|
||||
def test_calc_used_res(self):
|
||||
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
|
||||
self.m_model.return_value = model
|
||||
node = model.get_node_from_id('Node_0')
|
||||
cap_cores = model.get_resource_from_id(element.ResourceType.cpu_cores)
|
||||
cap_mem = model.get_resource_from_id(element.ResourceType.memory)
|
||||
cap_disk = model.get_resource_from_id(element.ResourceType.disk)
|
||||
node = model.get_node_by_uuid('Node_0')
|
||||
cap_cores = model.get_resource_by_uuid(element.ResourceType.cpu_cores)
|
||||
cap_mem = model.get_resource_by_uuid(element.ResourceType.memory)
|
||||
cap_disk = model.get_resource_by_uuid(element.ResourceType.disk)
|
||||
cores_used, mem_used, disk_used = self.strategy.calc_used_res(
|
||||
node, cap_cores, cap_mem, cap_disk)
|
||||
|
||||
|
@ -70,10 +70,10 @@ class TestUniformAirflow(base.TestCase):
|
||||
def test_calc_used_res(self):
|
||||
model = self.fake_cluster.generate_scenario_7_with_2_nodes()
|
||||
self.m_model.return_value = model
|
||||
node = model.get_node_from_id('Node_0')
|
||||
cap_cores = model.get_resource_from_id(element.ResourceType.cpu_cores)
|
||||
cap_mem = model.get_resource_from_id(element.ResourceType.memory)
|
||||
cap_disk = model.get_resource_from_id(element.ResourceType.disk)
|
||||
node = model.get_node_by_uuid('Node_0')
|
||||
cap_cores = model.get_resource_by_uuid(element.ResourceType.cpu_cores)
|
||||
cap_mem = model.get_resource_by_uuid(element.ResourceType.memory)
|
||||
cap_disk = model.get_resource_by_uuid(element.ResourceType.disk)
|
||||
cores_used, mem_used, disk_used = self.\
|
||||
strategy.calculate_used_resource(
|
||||
node, cap_cores, cap_mem, cap_disk)
|
||||
|
@ -69,7 +69,7 @@ class TestVMWorkloadConsolidation(base.TestCase):
|
||||
model = self.fake_cluster.generate_scenario_1()
|
||||
self.m_model.return_value = model
|
||||
self.fake_metrics.model = model
|
||||
instance_0 = model.get_instance_from_id("INSTANCE_0")
|
||||
instance_0 = model.get_instance_by_uuid("INSTANCE_0")
|
||||
instance_util = dict(cpu=1.0, ram=1, disk=10)
|
||||
self.assertEqual(
|
||||
instance_util,
|
||||
@ -79,7 +79,7 @@ class TestVMWorkloadConsolidation(base.TestCase):
|
||||
model = self.fake_cluster.generate_scenario_1()
|
||||
self.m_model.return_value = model
|
||||
self.fake_metrics.model = model
|
||||
node_0 = model.get_node_from_id("Node_0")
|
||||
node_0 = model.get_node_by_uuid("Node_0")
|
||||
node_util = dict(cpu=1.0, ram=1, disk=10)
|
||||
self.assertEqual(
|
||||
node_util,
|
||||
@ -89,7 +89,7 @@ class TestVMWorkloadConsolidation(base.TestCase):
|
||||
model = self.fake_cluster.generate_scenario_1()
|
||||
self.m_model.return_value = model
|
||||
self.fake_metrics.model = model
|
||||
node_0 = model.get_node_from_id("Node_0")
|
||||
node_0 = model.get_node_by_uuid("Node_0")
|
||||
node_util = dict(cpu=40, ram=64, disk=250)
|
||||
self.assertEqual(node_util,
|
||||
self.strategy.get_node_capacity(node_0, model))
|
||||
@ -98,7 +98,7 @@ class TestVMWorkloadConsolidation(base.TestCase):
|
||||
model = self.fake_cluster.generate_scenario_1()
|
||||
self.m_model.return_value = model
|
||||
self.fake_metrics.model = model
|
||||
node = model.get_node_from_id('Node_0')
|
||||
node = model.get_node_by_uuid('Node_0')
|
||||
rhu = self.strategy.get_relative_node_utilization(
|
||||
node, model)
|
||||
expected_rhu = {'disk': 0.04, 'ram': 0.015625, 'cpu': 0.025}
|
||||
@ -116,8 +116,8 @@ class TestVMWorkloadConsolidation(base.TestCase):
|
||||
model = self.fake_cluster.generate_scenario_1()
|
||||
self.m_model.return_value = model
|
||||
self.fake_metrics.model = model
|
||||
n1 = model.get_node_from_id('Node_0')
|
||||
n2 = model.get_node_from_id('Node_1')
|
||||
n1 = model.get_node_by_uuid('Node_0')
|
||||
n2 = model.get_node_by_uuid('Node_1')
|
||||
instance_uuid = 'INSTANCE_0'
|
||||
self.strategy.add_migration(instance_uuid, n1, n2, model)
|
||||
self.assertEqual(1, len(self.strategy.solution.actions))
|
||||
@ -132,7 +132,7 @@ class TestVMWorkloadConsolidation(base.TestCase):
|
||||
model = self.fake_cluster.generate_scenario_1()
|
||||
self.m_model.return_value = model
|
||||
self.fake_metrics.model = model
|
||||
n1 = model.get_node_from_id('Node_0')
|
||||
n1 = model.get_node_by_uuid('Node_0')
|
||||
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
|
||||
res = self.strategy.is_overloaded(n1, model, cc)
|
||||
self.assertFalse(res)
|
||||
@ -149,7 +149,7 @@ class TestVMWorkloadConsolidation(base.TestCase):
|
||||
model = self.fake_cluster.generate_scenario_1()
|
||||
self.m_model.return_value = model
|
||||
self.fake_metrics.model = model
|
||||
n = model.get_node_from_id('Node_1')
|
||||
n = model.get_node_by_uuid('Node_1')
|
||||
instance_uuid = 'INSTANCE_0'
|
||||
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
|
||||
res = self.strategy.instance_fits(instance_uuid, n, model, cc)
|
||||
@ -163,7 +163,7 @@ class TestVMWorkloadConsolidation(base.TestCase):
|
||||
model = self.fake_cluster.generate_scenario_1()
|
||||
self.m_model.return_value = model
|
||||
self.fake_metrics.model = model
|
||||
n = model.get_node_from_id('Node_0')
|
||||
n = model.get_node_by_uuid('Node_0')
|
||||
self.strategy.add_action_enable_compute_node(n)
|
||||
expected = [{'action_type': 'change_nova_service_state',
|
||||
'input_parameters': {'state': 'enabled',
|
||||
@ -174,7 +174,7 @@ class TestVMWorkloadConsolidation(base.TestCase):
|
||||
model = self.fake_cluster.generate_scenario_1()
|
||||
self.m_model.return_value = model
|
||||
self.fake_metrics.model = model
|
||||
n = model.get_node_from_id('Node_0')
|
||||
n = model.get_node_by_uuid('Node_0')
|
||||
self.strategy.add_action_disable_node(n)
|
||||
expected = [{'action_type': 'change_nova_service_state',
|
||||
'input_parameters': {'state': 'disabled',
|
||||
@ -185,8 +185,8 @@ class TestVMWorkloadConsolidation(base.TestCase):
|
||||
model = self.fake_cluster.generate_scenario_1()
|
||||
self.m_model.return_value = model
|
||||
self.fake_metrics.model = model
|
||||
n1 = model.get_node_from_id('Node_0')
|
||||
n2 = model.get_node_from_id('Node_1')
|
||||
n1 = model.get_node_by_uuid('Node_0')
|
||||
n2 = model.get_node_by_uuid('Node_1')
|
||||
instance_uuid = 'INSTANCE_0'
|
||||
self.strategy.disable_unused_nodes(model)
|
||||
self.assertEqual(0, len(self.strategy.solution.actions))
|
||||
@ -214,8 +214,8 @@ class TestVMWorkloadConsolidation(base.TestCase):
|
||||
model = self.fake_cluster.generate_scenario_1()
|
||||
self.m_model.return_value = model
|
||||
self.fake_metrics.model = model
|
||||
n1 = model.get_node_from_id('Node_0')
|
||||
n2 = model.get_node_from_id('Node_1')
|
||||
n1 = model.get_node_by_uuid('Node_0')
|
||||
n2 = model.get_node_by_uuid('Node_1')
|
||||
instance_uuid = 'INSTANCE_0'
|
||||
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
|
||||
self.strategy.consolidation_phase(model, cc)
|
||||
@ -230,7 +230,7 @@ class TestVMWorkloadConsolidation(base.TestCase):
|
||||
model = self.fake_cluster.generate_scenario_2()
|
||||
self.m_model.return_value = model
|
||||
self.fake_metrics.model = model
|
||||
n1 = model.get_node_from_id('Node_0')
|
||||
n1 = model.get_node_by_uuid('Node_0')
|
||||
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
|
||||
self.strategy.offload_phase(model, cc)
|
||||
self.strategy.consolidation_phase(model, cc)
|
||||
@ -254,8 +254,8 @@ class TestVMWorkloadConsolidation(base.TestCase):
|
||||
model = self.fake_cluster.generate_scenario_3()
|
||||
self.m_model.return_value = model
|
||||
self.fake_metrics.model = model
|
||||
n1 = model.get_node_from_id('Node_0')
|
||||
n2 = model.get_node_from_id('Node_1')
|
||||
n1 = model.get_node_by_uuid('Node_0')
|
||||
n2 = model.get_node_by_uuid('Node_1')
|
||||
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}
|
||||
self.strategy.offload_phase(model, cc)
|
||||
expected = [{'action_type': 'migrate',
|
||||
|
@ -66,10 +66,10 @@ class TestWorkloadBalance(base.TestCase):
|
||||
def test_calc_used_res(self):
|
||||
model = self.fake_cluster.generate_scenario_6_with_2_nodes()
|
||||
self.m_model.return_value = model
|
||||
node = model.get_node_from_id('Node_0')
|
||||
cap_cores = model.get_resource_from_id(element.ResourceType.cpu_cores)
|
||||
cap_mem = model.get_resource_from_id(element.ResourceType.memory)
|
||||
cap_disk = model.get_resource_from_id(element.ResourceType.disk)
|
||||
node = model.get_node_by_uuid('Node_0')
|
||||
cap_cores = model.get_resource_by_uuid(element.ResourceType.cpu_cores)
|
||||
cap_mem = model.get_resource_by_uuid(element.ResourceType.memory)
|
||||
cap_disk = model.get_resource_by_uuid(element.ResourceType.disk)
|
||||
cores_used, mem_used, disk_used = (
|
||||
self.strategy.calculate_used_resource(
|
||||
node, cap_cores, cap_mem, cap_disk))
|
||||
|
@ -49,6 +49,8 @@ class TestExecuteBasicStrategy(base.BaseInfraOptimScenarioTest):
|
||||
enabled_compute_nodes = [cn for cn in cls.initial_compute_nodes_setup
|
||||
if cn.get('status') == 'enabled']
|
||||
|
||||
cls.wait_for_compute_node_setup()
|
||||
|
||||
if len(enabled_compute_nodes) < 2:
|
||||
raise cls.skipException(
|
||||
"Less than 2 compute nodes are enabled, "
|
||||
@ -62,6 +64,32 @@ class TestExecuteBasicStrategy(base.BaseInfraOptimScenarioTest):
|
||||
return [srv for srv in available_services
|
||||
if srv.get('binary') == 'nova-compute']
|
||||
|
||||
@classmethod
|
||||
def wait_for_compute_node_setup(cls):
|
||||
|
||||
def _are_compute_nodes_setup():
|
||||
try:
|
||||
hypervisors_client = cls.mgr.hypervisor_client
|
||||
hypervisors = hypervisors_client.list_hypervisors(
|
||||
detail=True)['hypervisors']
|
||||
available_hypervisors = set(
|
||||
hyp['hypervisor_hostname'] for hyp in hypervisors)
|
||||
available_services = set(
|
||||
service['host']
|
||||
for service in cls.get_compute_nodes_setup())
|
||||
|
||||
return (
|
||||
available_hypervisors == available_services and
|
||||
len(hypervisors) >= 2)
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
assert test.call_until_true(
|
||||
func=_are_compute_nodes_setup,
|
||||
duration=600,
|
||||
sleep_for=2
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def rollback_compute_nodes_status(cls):
|
||||
current_compute_nodes_setup = cls.get_compute_nodes_setup()
|
||||
@ -107,6 +135,7 @@ class TestExecuteBasicStrategy(base.BaseInfraOptimScenarioTest):
|
||||
"""
|
||||
self.addCleanup(self.rollback_compute_nodes_status)
|
||||
self._create_one_instance_per_host()
|
||||
|
||||
_, goal = self.client.show_goal(self.BASIC_GOAL)
|
||||
_, strategy = self.client.show_strategy("basic")
|
||||
_, audit_template = self.create_audit_template(
|
||||
|
Loading…
x
Reference in New Issue
Block a user