diff --git a/tox.ini b/tox.ini
index 1c8f13340..978ac6fe7 100644
--- a/tox.ini
+++ b/tox.ini
@@ -20,7 +20,7 @@ commands =
commands =
doc8 doc/source/ CONTRIBUTING.rst HACKING.rst README.rst
flake8
- bandit -r watcher -x tests -n5 -ll
+ bandit -r watcher -x tests -n5 -ll -s B320
[testenv:venv]
setenv = PYTHONHASHSEED=0
@@ -66,4 +66,4 @@ commands = sphinx-build -a -W -E -d releasenotes/build/doctrees -b html releasen
[testenv:bandit]
deps = -r{toxinidir}/test-requirements.txt
-commands = bandit -r watcher -x tests -n5 -ll
+commands = bandit -r watcher -x tests -n5 -ll -s B320
diff --git a/watcher/decision_engine/model/model_root.py b/watcher/decision_engine/model/model_root.py
index cbaee0a1f..054a287ed 100644
--- a/watcher/decision_engine/model/model_root.py
+++ b/watcher/decision_engine/model/model_root.py
@@ -223,3 +223,53 @@ class ModelRoot(object):
root.append(self._build_instance_element(instance))
return etree.tostring(root, pretty_print=True).decode('utf-8')
+
+ @classmethod
+ def from_xml(cls, data):
+ model = cls()
+ root = etree.fromstring(data)
+
+ mem = element.Resource(element.ResourceType.memory)
+ num_cores = element.Resource(element.ResourceType.cpu_cores)
+ disk = element.Resource(element.ResourceType.disk)
+ disk_capacity = element.Resource(element.ResourceType.disk_capacity)
+ model.create_resource(mem)
+ model.create_resource(num_cores)
+ model.create_resource(disk)
+ model.create_resource(disk_capacity)
+
+ for cn in root.findall('.//ComputeNode'):
+ node = element.ComputeNode(cn.get('id'))
+ node.uuid = cn.get('uuid')
+ node.hostname = cn.get('hostname')
+ # set capacity
+ mem.set_capacity(node, int(cn.get(str(mem.name))))
+ disk.set_capacity(node, int(cn.get(str(disk.name))))
+ disk_capacity.set_capacity(
+ node, int(cn.get(str(disk_capacity.name))))
+ num_cores.set_capacity(node, int(cn.get(str(num_cores.name))))
+ node.state = cn.get('state')
+ node.status = cn.get('status')
+
+ model.add_node(node)
+
+ for inst in root.findall('.//Instance'):
+ instance = element.Instance()
+ instance.uuid = inst.get('uuid')
+ instance.state = inst.get('state')
+
+ mem.set_capacity(instance, int(inst.get(str(mem.name))))
+ disk.set_capacity(instance, int(inst.get(str(disk.name))))
+ disk_capacity.set_capacity(
+ instance, int(inst.get(str(disk_capacity.name))))
+ num_cores.set_capacity(
+ instance, int(inst.get(str(num_cores.name))))
+
+ parent = inst.getparent()
+ if parent.tag == 'ComputeNode':
+ node = model.get_node_by_uuid(parent.get('uuid'))
+ model.map_instance(instance, node)
+ else:
+ model.add_instance(instance)
+
+ return model
diff --git a/watcher/tests/decision_engine/audit/test_audit_handlers.py b/watcher/tests/decision_engine/audit/test_audit_handlers.py
index 1050f5d7f..20210f23f 100644
--- a/watcher/tests/decision_engine/audit/test_audit_handlers.py
+++ b/watcher/tests/decision_engine/audit/test_audit_handlers.py
@@ -25,8 +25,7 @@ from watcher.decision_engine.messaging import events
from watcher.decision_engine.model.collector import manager
from watcher.objects import audit as audit_objects
from watcher.tests.db import base
-from watcher.tests.decision_engine.strategy.strategies import \
- faker_cluster_state as faker
+from watcher.tests.decision_engine.model import faker_cluster_state as faker
from watcher.tests.objects import utils as obj_utils
diff --git a/watcher/tests/decision_engine/loading/test_collector_loader.py b/watcher/tests/decision_engine/loading/test_collector_loader.py
index d6de14c78..049c34801 100644
--- a/watcher/tests/decision_engine/loading/test_collector_loader.py
+++ b/watcher/tests/decision_engine/loading/test_collector_loader.py
@@ -23,8 +23,7 @@ from watcher.common import exception
from watcher.decision_engine.loading import default as default_loading
from watcher.tests import base
from watcher.tests import conf_fixture
-from watcher.tests.decision_engine.strategy.strategies import \
- faker_cluster_state
+from watcher.tests.decision_engine.model import faker_cluster_state
class TestClusterDataModelCollectorLoader(base.TestCase):
diff --git a/watcher/tests/decision_engine/messaging/test_audit_endpoint.py b/watcher/tests/decision_engine/messaging/test_audit_endpoint.py
index 64252f79a..3ee24627a 100644
--- a/watcher/tests/decision_engine/messaging/test_audit_endpoint.py
+++ b/watcher/tests/decision_engine/messaging/test_audit_endpoint.py
@@ -20,8 +20,7 @@ from watcher.decision_engine.audit import oneshot as oneshot_handler
from watcher.decision_engine.messaging import audit_endpoint
from watcher.decision_engine.model.collector import manager
from watcher.tests.db import base
-from watcher.tests.decision_engine.strategy.strategies \
- import faker_cluster_state
+from watcher.tests.decision_engine.model import faker_cluster_state
from watcher.tests.objects import utils as obj_utils
diff --git a/watcher/tests/decision_engine/model/data/scenario_1.xml b/watcher/tests/decision_engine/model/data/scenario_1.xml
new file mode 100644
index 000000000..e9039cc13
--- /dev/null
+++ b/watcher/tests/decision_engine/model/data/scenario_1.xml
@@ -0,0 +1,47 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/watcher/tests/decision_engine/model/data/scenario_1_with_metrics.xml b/watcher/tests/decision_engine/model/data/scenario_1_with_metrics.xml
new file mode 100644
index 000000000..12bd6efeb
--- /dev/null
+++ b/watcher/tests/decision_engine/model/data/scenario_1_with_metrics.xml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
diff --git a/watcher/tests/decision_engine/model/data/scenario_2_with_metrics.xml b/watcher/tests/decision_engine/model/data/scenario_2_with_metrics.xml
new file mode 100644
index 000000000..b401f5aaa
--- /dev/null
+++ b/watcher/tests/decision_engine/model/data/scenario_2_with_metrics.xml
@@ -0,0 +1,13 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/watcher/tests/decision_engine/model/data/scenario_3_with_2_nodes.xml b/watcher/tests/decision_engine/model/data/scenario_3_with_2_nodes.xml
new file mode 100644
index 000000000..a9ad7d896
--- /dev/null
+++ b/watcher/tests/decision_engine/model/data/scenario_3_with_2_nodes.xml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
diff --git a/watcher/tests/decision_engine/model/data/scenario_3_with_metrics.xml b/watcher/tests/decision_engine/model/data/scenario_3_with_metrics.xml
new file mode 100644
index 000000000..988940868
--- /dev/null
+++ b/watcher/tests/decision_engine/model/data/scenario_3_with_metrics.xml
@@ -0,0 +1,9 @@
+
+
+
+
+
+
+
+
+
diff --git a/watcher/tests/decision_engine/model/data/scenario_4_with_1_node_no_instance.xml b/watcher/tests/decision_engine/model/data/scenario_4_with_1_node_no_instance.xml
new file mode 100644
index 000000000..2bed67c9c
--- /dev/null
+++ b/watcher/tests/decision_engine/model/data/scenario_4_with_1_node_no_instance.xml
@@ -0,0 +1,3 @@
+
+
+
diff --git a/watcher/tests/decision_engine/model/data/scenario_5_with_instance_disk_0.xml b/watcher/tests/decision_engine/model/data/scenario_5_with_instance_disk_0.xml
new file mode 100644
index 000000000..d8027aa8f
--- /dev/null
+++ b/watcher/tests/decision_engine/model/data/scenario_5_with_instance_disk_0.xml
@@ -0,0 +1,5 @@
+
+
+
+
+
diff --git a/watcher/tests/decision_engine/model/data/scenario_6_with_2_nodes.xml b/watcher/tests/decision_engine/model/data/scenario_6_with_2_nodes.xml
new file mode 100644
index 000000000..d8b0d38a7
--- /dev/null
+++ b/watcher/tests/decision_engine/model/data/scenario_6_with_2_nodes.xml
@@ -0,0 +1,10 @@
+
+
+
+
+
+
+
+
+
+
diff --git a/watcher/tests/decision_engine/model/data/scenario_7_with_2_nodes.xml b/watcher/tests/decision_engine/model/data/scenario_7_with_2_nodes.xml
new file mode 100644
index 000000000..630d61858
--- /dev/null
+++ b/watcher/tests/decision_engine/model/data/scenario_7_with_2_nodes.xml
@@ -0,0 +1,10 @@
+
+
+
+
+
+
+
+
+
+
diff --git a/watcher/tests/decision_engine/model/faker_cluster_and_metrics.py b/watcher/tests/decision_engine/model/faker_cluster_and_metrics.py
new file mode 100644
index 000000000..c551621f8
--- /dev/null
+++ b/watcher/tests/decision_engine/model/faker_cluster_and_metrics.py
@@ -0,0 +1,154 @@
+# -*- encoding: utf-8 -*-
+#
+# Authors: Vojtech CIMA
+# Bruno GRAZIOLI
+# Sean MURPHY
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+import mock
+
+from watcher.decision_engine.model.collector import base
+from watcher.decision_engine.model import element
+from watcher.decision_engine.model import model_root as modelroot
+
+
+class FakerModelCollector(base.BaseClusterDataModelCollector):
+
+ def __init__(self, config=None, osc=None):
+ if config is None:
+ config = mock.Mock()
+ super(FakerModelCollector, self).__init__(config)
+
+ @property
+ def notification_endpoints(self):
+ return []
+
+ def execute(self):
+ return self.generate_scenario_1()
+
+ def load_data(self, filename):
+ cwd = os.path.abspath(os.path.dirname(__file__))
+ data_folder = os.path.join(cwd, "data")
+
+ with open(os.path.join(data_folder, filename), 'rb') as xml_file:
+ xml_data = xml_file.read()
+
+ return xml_data
+
+ def load_model(self, filename):
+ return modelroot.ModelRoot.from_xml(self.load_data(filename))
+
+ def generate_scenario_1(self):
+ """Simulates cluster with 2 nodes and 2 instances using 1:1 mapping"""
+ return self.load_model('scenario_1_with_metrics.xml')
+
+ def generate_scenario_2(self):
+ """Simulates a cluster
+
+ With 4 nodes and 6 instances all mapped to a single node
+ """
+ return self.load_model('scenario_2_with_metrics.xml')
+
+ def generate_scenario_3(self):
+ """Simulates a cluster
+
+ With 4 nodes and 6 instances all mapped to one node
+ """
+ return self.load_model('scenario_3_with_metrics.xml')
+
+
+class FakeCeilometerMetrics(object):
+ def __init__(self, model):
+ self.model = model
+
+ def mock_get_statistics(self, resource_id, meter_name, period=3600,
+ aggregate='avg'):
+ if meter_name == "compute.node.cpu.percent":
+ return self.get_node_cpu_util(resource_id)
+ elif meter_name == "cpu_util":
+ return self.get_instance_cpu_util(resource_id)
+ elif meter_name == "memory.usage":
+ return self.get_instance_ram_util(resource_id)
+ elif meter_name == "disk.root.size":
+ return self.get_instance_disk_root_size(resource_id)
+
+ def get_node_cpu_util(self, r_id):
+ """Calculates node utilization dynamicaly.
+
+ node CPU utilization should consider
+ and corelate with actual instance-node mappings
+ provided within a cluster model.
+ Returns relative node CPU utilization <0, 100>.
+ :param r_id: resource id
+ """
+
+ id = '%s_%s' % (r_id.split('_')[0], r_id.split('_')[1])
+ instances = self.model.get_mapping().get_node_instances_by_uuid(id)
+ util_sum = 0.0
+ node_cpu_cores = self.model.get_resource_by_uuid(
+ element.ResourceType.cpu_cores).get_capacity_by_uuid(id)
+ for instance_uuid in instances:
+ instance_cpu_cores = self.model.get_resource_by_uuid(
+ element.ResourceType.cpu_cores).\
+ get_capacity(self.model.get_instance_by_uuid(instance_uuid))
+ total_cpu_util = instance_cpu_cores * self.get_instance_cpu_util(
+ instance_uuid)
+ util_sum += total_cpu_util / 100.0
+ util_sum /= node_cpu_cores
+ return util_sum * 100.0
+
+ def get_instance_cpu_util(self, r_id):
+ instance_cpu_util = dict()
+ instance_cpu_util['INSTANCE_0'] = 10
+ instance_cpu_util['INSTANCE_1'] = 30
+ instance_cpu_util['INSTANCE_2'] = 60
+ instance_cpu_util['INSTANCE_3'] = 20
+ instance_cpu_util['INSTANCE_4'] = 40
+ instance_cpu_util['INSTANCE_5'] = 50
+ instance_cpu_util['INSTANCE_6'] = 100
+ instance_cpu_util['INSTANCE_7'] = 100
+ instance_cpu_util['INSTANCE_8'] = 100
+ instance_cpu_util['INSTANCE_9'] = 100
+ return instance_cpu_util[str(r_id)]
+
+ def get_instance_ram_util(self, r_id):
+ instance_ram_util = dict()
+ instance_ram_util['INSTANCE_0'] = 1
+ instance_ram_util['INSTANCE_1'] = 2
+ instance_ram_util['INSTANCE_2'] = 4
+ instance_ram_util['INSTANCE_3'] = 8
+ instance_ram_util['INSTANCE_4'] = 3
+ instance_ram_util['INSTANCE_5'] = 2
+ instance_ram_util['INSTANCE_6'] = 1
+ instance_ram_util['INSTANCE_7'] = 2
+ instance_ram_util['INSTANCE_8'] = 4
+ instance_ram_util['INSTANCE_9'] = 8
+ return instance_ram_util[str(r_id)]
+
+ def get_instance_disk_root_size(self, r_id):
+ instance_disk_util = dict()
+ instance_disk_util['INSTANCE_0'] = 10
+ instance_disk_util['INSTANCE_1'] = 15
+ instance_disk_util['INSTANCE_2'] = 30
+ instance_disk_util['INSTANCE_3'] = 35
+ instance_disk_util['INSTANCE_4'] = 20
+ instance_disk_util['INSTANCE_5'] = 25
+ instance_disk_util['INSTANCE_6'] = 25
+ instance_disk_util['INSTANCE_7'] = 25
+ instance_disk_util['INSTANCE_8'] = 25
+ instance_disk_util['INSTANCE_9'] = 25
+ return instance_disk_util[str(r_id)]
diff --git a/watcher/tests/decision_engine/model/faker_cluster_state.py b/watcher/tests/decision_engine/model/faker_cluster_state.py
new file mode 100644
index 000000000..af1484629
--- /dev/null
+++ b/watcher/tests/decision_engine/model/faker_cluster_state.py
@@ -0,0 +1,150 @@
+# -*- encoding: utf-8 -*-
+# Copyright (c) 2015 b<>com
+#
+# Authors: Jean-Emile DARTOIS
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+import mock
+
+from watcher.decision_engine.model.collector import base
+from watcher.decision_engine.model import element
+from watcher.decision_engine.model import model_root as modelroot
+
+
+class FakerModelCollector(base.BaseClusterDataModelCollector):
+
+ def __init__(self, config=None, osc=None):
+ if config is None:
+ config = mock.Mock(period=777)
+ super(FakerModelCollector, self).__init__(config)
+
+ @property
+ def notification_endpoints(self):
+ return []
+
+ def load_data(self, filename):
+ cwd = os.path.abspath(os.path.dirname(__file__))
+ data_folder = os.path.join(cwd, "data")
+
+ with open(os.path.join(data_folder, filename), 'rb') as xml_file:
+ xml_data = xml_file.read()
+
+ return xml_data
+
+ def load_model(self, filename):
+ return modelroot.ModelRoot.from_xml(self.load_data(filename))
+
+ def execute(self):
+ return self._cluster_data_model or self.build_scenario_1()
+
+ def build_scenario_1(self):
+ instances = []
+
+ current_state_cluster = modelroot.ModelRoot()
+ # number of nodes
+ node_count = 5
+ # number max of instance per node
+ node_instance_count = 7
+ # total number of virtual machine
+ instance_count = (node_count * node_instance_count)
+
+ # define ressouce ( CPU, MEM disk, ... )
+ mem = element.Resource(element.ResourceType.memory)
+ # 2199.954 Mhz
+ num_cores = element.Resource(element.ResourceType.cpu_cores)
+ disk = element.Resource(element.ResourceType.disk)
+ disk_capacity = element.Resource(element.ResourceType.disk_capacity)
+
+ current_state_cluster.create_resource(mem)
+ current_state_cluster.create_resource(num_cores)
+ current_state_cluster.create_resource(disk)
+ current_state_cluster.create_resource(disk_capacity)
+
+ for id_ in range(0, node_count):
+ node_uuid = "Node_{0}".format(id_)
+ node = element.ComputeNode(id_)
+ node.uuid = node_uuid
+ node.hostname = "hostname_{0}".format(id_)
+
+ mem.set_capacity(node, 132)
+ disk.set_capacity(node, 250)
+ disk_capacity.set_capacity(node, 250)
+ num_cores.set_capacity(node, 40)
+ current_state_cluster.add_node(node)
+
+ for i in range(0, instance_count):
+ instance_uuid = "INSTANCE_{0}".format(i)
+ instance = element.Instance()
+ instance.uuid = instance_uuid
+ mem.set_capacity(instance, 2)
+ disk.set_capacity(instance, 20)
+ disk_capacity.set_capacity(instance, 20)
+ num_cores.set_capacity(instance, 10)
+ instances.append(instance)
+ current_state_cluster.add_instance(instance)
+
+ current_state_cluster.mapping.map(
+ current_state_cluster.get_node_by_uuid("Node_0"),
+ current_state_cluster.get_instance_by_uuid("INSTANCE_0"))
+
+ current_state_cluster.mapping.map(
+ current_state_cluster.get_node_by_uuid("Node_0"),
+ current_state_cluster.get_instance_by_uuid("INSTANCE_1"))
+
+ current_state_cluster.mapping.map(
+ current_state_cluster.get_node_by_uuid("Node_1"),
+ current_state_cluster.get_instance_by_uuid("INSTANCE_2"))
+
+ current_state_cluster.mapping.map(
+ current_state_cluster.get_node_by_uuid("Node_2"),
+ current_state_cluster.get_instance_by_uuid("INSTANCE_3"))
+
+ current_state_cluster.mapping.map(
+ current_state_cluster.get_node_by_uuid("Node_2"),
+ current_state_cluster.get_instance_by_uuid("INSTANCE_4"))
+
+ current_state_cluster.mapping.map(
+ current_state_cluster.get_node_by_uuid("Node_2"),
+ current_state_cluster.get_instance_by_uuid("INSTANCE_5"))
+
+ current_state_cluster.mapping.map(
+ current_state_cluster.get_node_by_uuid("Node_3"),
+ current_state_cluster.get_instance_by_uuid("INSTANCE_6"))
+
+ current_state_cluster.mapping.map(
+ current_state_cluster.get_node_by_uuid("Node_4"),
+ current_state_cluster.get_instance_by_uuid("INSTANCE_7"))
+
+ return current_state_cluster
+
+ def generate_scenario_1(self):
+ return self.load_model('scenario_1.xml')
+
+ def generate_scenario_3_with_2_nodes(self):
+ return self.load_model('scenario_3_with_2_nodes.xml')
+
+ def generate_scenario_4_with_1_node_no_instance(self):
+ return self.load_model('scenario_4_with_1_node_no_instance.xml')
+
+ def generate_scenario_5_with_instance_disk_0(self):
+ return self.load_model('scenario_5_with_instance_disk_0.xml')
+
+ def generate_scenario_6_with_2_nodes(self):
+ return self.load_model('scenario_6_with_2_nodes.xml')
+
+ def generate_scenario_7_with_2_nodes(self):
+ return self.load_model('scenario_7_with_2_nodes.xml')
diff --git a/watcher/tests/decision_engine/strategy/strategies/faker_metrics_collector.py b/watcher/tests/decision_engine/model/faker_metrics_collector.py
similarity index 98%
rename from watcher/tests/decision_engine/strategy/strategies/faker_metrics_collector.py
rename to watcher/tests/decision_engine/model/faker_metrics_collector.py
index e7e1d8b5f..3fd93fec5 100644
--- a/watcher/tests/decision_engine/strategy/strategies/faker_metrics_collector.py
+++ b/watcher/tests/decision_engine/model/faker_metrics_collector.py
@@ -250,9 +250,3 @@ class FakerMetricsCollector(object):
def get_virtual_machine_capacity(self, instance_uuid):
return random.randint(1, 4)
-
- def get_average_network_incomming(self, node):
- pass
-
- def get_average_network_outcomming(self, node):
- pass
diff --git a/watcher/tests/decision_engine/model/notification/fake_managers.py b/watcher/tests/decision_engine/model/notification/fake_managers.py
index 59b652bd1..1054ef617 100644
--- a/watcher/tests/decision_engine/model/notification/fake_managers.py
+++ b/watcher/tests/decision_engine/model/notification/fake_managers.py
@@ -17,8 +17,7 @@
# limitations under the License.
from watcher.decision_engine.model.notification import nova as novanotification
-from watcher.tests.decision_engine.strategy.strategies \
- import faker_cluster_state
+from watcher.tests.decision_engine.model import faker_cluster_state
class FakeManager(object):
diff --git a/watcher/tests/decision_engine/model/notification/test_nova_notifications.py b/watcher/tests/decision_engine/model/notification/test_nova_notifications.py
index e766a7a97..fae9d9a43 100644
--- a/watcher/tests/decision_engine/model/notification/test_nova_notifications.py
+++ b/watcher/tests/decision_engine/model/notification/test_nova_notifications.py
@@ -29,9 +29,8 @@ from watcher.decision_engine.model import element
from watcher.decision_engine.model import model_root
from watcher.decision_engine.model.notification import nova as novanotification
from watcher.tests import base as base_test
+from watcher.tests.decision_engine.model import faker_cluster_state
from watcher.tests.decision_engine.model.notification import fake_managers
-from watcher.tests.decision_engine.strategy.strategies \
- import faker_cluster_state
class NotificationTestCase(base_test.TestCase):
diff --git a/watcher/tests/decision_engine/model/test_mapping.py b/watcher/tests/decision_engine/model/test_mapping.py
index f41514870..36e771226 100644
--- a/watcher/tests/decision_engine/model/test_mapping.py
+++ b/watcher/tests/decision_engine/model/test_mapping.py
@@ -20,8 +20,7 @@ import uuid
from watcher.decision_engine.model import element
from watcher.tests import base
-from watcher.tests.decision_engine.strategy.strategies import \
- faker_cluster_state
+from watcher.tests.decision_engine.model import faker_cluster_state
class TestMapping(base.TestCase):
diff --git a/watcher/tests/decision_engine/model/test_model.py b/watcher/tests/decision_engine/model/test_model.py
index 0e0844ad3..b995812ab 100644
--- a/watcher/tests/decision_engine/model/test_model.py
+++ b/watcher/tests/decision_engine/model/test_model.py
@@ -16,6 +16,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import os
+
from lxml import etree
from oslo_utils import uuidutils
import six
@@ -24,154 +26,32 @@ from watcher.common import exception
from watcher.decision_engine.model import element
from watcher.decision_engine.model import model_root
from watcher.tests import base
-from watcher.tests.decision_engine.strategy.strategies \
- import faker_cluster_state
+from watcher.tests.decision_engine.model import faker_cluster_state
class TestModel(base.TestCase):
+ def load_data(self, filename):
+ cwd = os.path.abspath(os.path.dirname(__file__))
+ data_folder = os.path.join(cwd, "data")
+
+ with open(os.path.join(data_folder, filename), 'rb') as xml_file:
+ xml_data = xml_file.read()
+
+ return xml_data
+
+ def load_model(self, filename):
+ return model_root.ModelRoot.from_xml(self.load_data(filename))
+
def test_model_structure(self):
fake_cluster = faker_cluster_state.FakerModelCollector()
- model = fake_cluster.generate_scenario_1()
+ model = fake_cluster.build_scenario_1()
self.assertEqual(5, len(model._nodes))
self.assertEqual(35, len(model._instances))
self.assertEqual(5, len(model.mapping.get_mapping()))
- expected_struct_str = """
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- """
+ expected_struct_str = self.load_data('scenario_1.xml')
parser = etree.XMLParser(remove_blank_text=True)
expected_struct = etree.fromstring(expected_struct_str, parser)
model_structure = etree.fromstring(model.to_string(), parser)
@@ -186,6 +66,15 @@ class TestModel(base.TestCase):
self.assertEqual(normalized_expected_struct, normalized_model_struct)
+ def test_build_model_from_xml(self):
+ fake_cluster = faker_cluster_state.FakerModelCollector()
+
+ expected_model = fake_cluster.generate_scenario_1()
+ struct_str = self.load_data('scenario_1.xml')
+
+ model = model_root.ModelRoot.from_xml(struct_str)
+ self.assertEqual(expected_model.to_string(), model.to_string())
+
def test_add_node(self):
model = model_root.ModelRoot()
uuid_ = "{0}".format(uuidutils.generate_uuid())
diff --git a/watcher/tests/decision_engine/planner/test_default_planner.py b/watcher/tests/decision_engine/planner/test_default_planner.py
index effd5269d..30c7da5b9 100644
--- a/watcher/tests/decision_engine/planner/test_default_planner.py
+++ b/watcher/tests/decision_engine/planner/test_default_planner.py
@@ -24,10 +24,8 @@ from watcher.decision_engine.strategy import strategies
from watcher import objects
from watcher.tests.db import base
from watcher.tests.db import utils as db_utils
-from watcher.tests.decision_engine.strategy.strategies \
- import faker_cluster_state
-from watcher.tests.decision_engine.strategy.strategies \
- import faker_metrics_collector as fake
+from watcher.tests.decision_engine.model import faker_cluster_state
+from watcher.tests.decision_engine.model import faker_metrics_collector as fake
from watcher.tests.objects import utils as obj_utils
diff --git a/watcher/tests/decision_engine/strategy/strategies/faker_cluster_and_metrics.py b/watcher/tests/decision_engine/strategy/strategies/faker_cluster_and_metrics.py
deleted file mode 100644
index 0b8f41682..000000000
--- a/watcher/tests/decision_engine/strategy/strategies/faker_cluster_and_metrics.py
+++ /dev/null
@@ -1,270 +0,0 @@
-# -*- encoding: utf-8 -*-
-#
-# Authors: Vojtech CIMA
-# Bruno GRAZIOLI
-# Sean MURPHY
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import mock
-
-from watcher.decision_engine.model.collector import base
-from watcher.decision_engine.model import element
-from watcher.decision_engine.model import model_root as modelroot
-
-
-class FakerModelCollector(base.BaseClusterDataModelCollector):
-
- def __init__(self, config=None, osc=None):
- if config is None:
- config = mock.Mock()
- super(FakerModelCollector, self).__init__(config)
-
- @property
- def notification_endpoints(self):
- return []
-
- def execute(self):
- return self.generate_scenario_1()
-
- def generate_scenario_1(self):
- """Simulates cluster with 2 nodes and 2 instances using 1:1 mapping"""
-
- current_state_cluster = modelroot.ModelRoot()
- count_node = 2
- count_instance = 2
-
- mem = element.Resource(element.ResourceType.memory)
- num_cores = element.Resource(element.ResourceType.cpu_cores)
- disk = element.Resource(element.ResourceType.disk)
- disk_capacity =\
- element.Resource(element.ResourceType.disk_capacity)
-
- current_state_cluster.create_resource(mem)
- current_state_cluster.create_resource(num_cores)
- current_state_cluster.create_resource(disk)
- current_state_cluster.create_resource(disk_capacity)
-
- for id_ in range(0, count_node):
- node_uuid = "Node_{0}".format(id_)
- node = element.ComputeNode(id_)
- node.uuid = node_uuid
- node.hostname = "hostname_{0}".format(id_)
- node.state = 'enabled'
-
- mem.set_capacity(node, 64)
- disk_capacity.set_capacity(node, 250)
- num_cores.set_capacity(node, 40)
- current_state_cluster.add_node(node)
-
- for i in range(0, count_instance):
- instance_uuid = "INSTANCE_{0}".format(i)
- instance = element.Instance()
- instance.uuid = instance_uuid
- instance.state = element.InstanceState.ACTIVE.value
- mem.set_capacity(instance, 2)
- disk.set_capacity(instance, 20)
- num_cores.set_capacity(instance, 10)
- current_state_cluster.add_instance(instance)
-
- current_state_cluster.get_mapping().map(
- current_state_cluster.get_node_by_uuid("Node_0"),
- current_state_cluster.get_instance_by_uuid("INSTANCE_0"))
-
- current_state_cluster.get_mapping().map(
- current_state_cluster.get_node_by_uuid("Node_1"),
- current_state_cluster.get_instance_by_uuid("INSTANCE_1"))
-
- return current_state_cluster
-
- def generate_scenario_2(self):
- """Simulates a cluster
-
- With 4 nodes and 6 instances all mapped to a single node
- """
-
- current_state_cluster = modelroot.ModelRoot()
- count_node = 4
- count_instance = 6
-
- mem = element.Resource(element.ResourceType.memory)
- num_cores = element.Resource(element.ResourceType.cpu_cores)
- disk = element.Resource(element.ResourceType.disk)
- disk_capacity =\
- element.Resource(element.ResourceType.disk_capacity)
-
- current_state_cluster.create_resource(mem)
- current_state_cluster.create_resource(num_cores)
- current_state_cluster.create_resource(disk)
- current_state_cluster.create_resource(disk_capacity)
-
- for id_ in range(0, count_node):
- node_uuid = "Node_{0}".format(id_)
- node = element.ComputeNode(id_)
- node.uuid = node_uuid
- node.hostname = "hostname_{0}".format(id_)
- node.state = 'up'
-
- mem.set_capacity(node, 64)
- disk_capacity.set_capacity(node, 250)
- num_cores.set_capacity(node, 16)
- current_state_cluster.add_node(node)
-
- for i in range(0, count_instance):
- instance_uuid = "INSTANCE_{0}".format(i)
- instance = element.Instance()
- instance.uuid = instance_uuid
- instance.state = element.InstanceState.ACTIVE.value
- mem.set_capacity(instance, 2)
- disk.set_capacity(instance, 20)
- num_cores.set_capacity(instance, 10)
- current_state_cluster.add_instance(instance)
-
- current_state_cluster.get_mapping().map(
- current_state_cluster.get_node_by_uuid("Node_0"),
- current_state_cluster.get_instance_by_uuid("INSTANCE_%s" % i))
-
- return current_state_cluster
-
- def generate_scenario_3(self):
- """Simulates a cluster
-
- With 4 nodes and 6 instances all mapped to one node
- """
-
- current_state_cluster = modelroot.ModelRoot()
- count_node = 2
- count_instance = 4
-
- mem = element.Resource(element.ResourceType.memory)
- num_cores = element.Resource(element.ResourceType.cpu_cores)
- disk = element.Resource(element.ResourceType.disk)
- disk_capacity =\
- element.Resource(element.ResourceType.disk_capacity)
-
- current_state_cluster.create_resource(mem)
- current_state_cluster.create_resource(num_cores)
- current_state_cluster.create_resource(disk)
- current_state_cluster.create_resource(disk_capacity)
-
- for id_ in range(0, count_node):
- node_uuid = "Node_{0}".format(id_)
- node = element.ComputeNode(id_)
- node.uuid = node_uuid
- node.hostname = "hostname_{0}".format(id_)
- node.state = 'up'
-
- mem.set_capacity(node, 64)
- disk_capacity.set_capacity(node, 250)
- num_cores.set_capacity(node, 10)
- current_state_cluster.add_node(node)
-
- for i in range(6, 6 + count_instance):
- instance_uuid = "INSTANCE_{0}".format(i)
- instance = element.Instance()
- instance.uuid = instance_uuid
- instance.state = element.InstanceState.ACTIVE.value
- mem.set_capacity(instance, 2)
- disk.set_capacity(instance, 20)
- num_cores.set_capacity(instance, 2 ** (i - 6))
- current_state_cluster.add_instance(instance)
-
- current_state_cluster.get_mapping().map(
- current_state_cluster.get_node_by_uuid("Node_0"),
- current_state_cluster.get_instance_by_uuid("INSTANCE_%s" % i))
-
- return current_state_cluster
-
-
-class FakeCeilometerMetrics(object):
- def __init__(self, model):
- self.model = model
-
- def mock_get_statistics(self, resource_id, meter_name, period=3600,
- aggregate='avg'):
- if meter_name == "compute.node.cpu.percent":
- return self.get_node_cpu_util(resource_id)
- elif meter_name == "cpu_util":
- return self.get_instance_cpu_util(resource_id)
- elif meter_name == "memory.usage":
- return self.get_instance_ram_util(resource_id)
- elif meter_name == "disk.root.size":
- return self.get_instance_disk_root_size(resource_id)
-
- def get_node_cpu_util(self, r_id):
- """Calculates node utilization dynamicaly.
-
- node CPU utilization should consider
- and corelate with actual instance-node mappings
- provided within a cluster model.
- Returns relative node CPU utilization <0, 100>.
- :param r_id: resource id
- """
-
- id = '%s_%s' % (r_id.split('_')[0], r_id.split('_')[1])
- instances = self.model.get_mapping().get_node_instances_by_uuid(id)
- util_sum = 0.0
- node_cpu_cores = self.model.get_resource_by_uuid(
- element.ResourceType.cpu_cores).get_capacity_by_uuid(id)
- for instance_uuid in instances:
- instance_cpu_cores = self.model.get_resource_by_uuid(
- element.ResourceType.cpu_cores).\
- get_capacity(self.model.get_instance_by_uuid(instance_uuid))
- total_cpu_util = instance_cpu_cores * self.get_instance_cpu_util(
- instance_uuid)
- util_sum += total_cpu_util / 100.0
- util_sum /= node_cpu_cores
- return util_sum * 100.0
-
- def get_instance_cpu_util(self, r_id):
- instance_cpu_util = dict()
- instance_cpu_util['INSTANCE_0'] = 10
- instance_cpu_util['INSTANCE_1'] = 30
- instance_cpu_util['INSTANCE_2'] = 60
- instance_cpu_util['INSTANCE_3'] = 20
- instance_cpu_util['INSTANCE_4'] = 40
- instance_cpu_util['INSTANCE_5'] = 50
- instance_cpu_util['INSTANCE_6'] = 100
- instance_cpu_util['INSTANCE_7'] = 100
- instance_cpu_util['INSTANCE_8'] = 100
- instance_cpu_util['INSTANCE_9'] = 100
- return instance_cpu_util[str(r_id)]
-
- def get_instance_ram_util(self, r_id):
- instance_ram_util = dict()
- instance_ram_util['INSTANCE_0'] = 1
- instance_ram_util['INSTANCE_1'] = 2
- instance_ram_util['INSTANCE_2'] = 4
- instance_ram_util['INSTANCE_3'] = 8
- instance_ram_util['INSTANCE_4'] = 3
- instance_ram_util['INSTANCE_5'] = 2
- instance_ram_util['INSTANCE_6'] = 1
- instance_ram_util['INSTANCE_7'] = 2
- instance_ram_util['INSTANCE_8'] = 4
- instance_ram_util['INSTANCE_9'] = 8
- return instance_ram_util[str(r_id)]
-
- def get_instance_disk_root_size(self, r_id):
- instance_disk_util = dict()
- instance_disk_util['INSTANCE_0'] = 10
- instance_disk_util['INSTANCE_1'] = 15
- instance_disk_util['INSTANCE_2'] = 30
- instance_disk_util['INSTANCE_3'] = 35
- instance_disk_util['INSTANCE_4'] = 20
- instance_disk_util['INSTANCE_5'] = 25
- instance_disk_util['INSTANCE_6'] = 25
- instance_disk_util['INSTANCE_7'] = 25
- instance_disk_util['INSTANCE_8'] = 25
- instance_disk_util['INSTANCE_9'] = 25
- return instance_disk_util[str(r_id)]
diff --git a/watcher/tests/decision_engine/strategy/strategies/faker_cluster_state.py b/watcher/tests/decision_engine/strategy/strategies/faker_cluster_state.py
deleted file mode 100644
index ead2b9c68..000000000
--- a/watcher/tests/decision_engine/strategy/strategies/faker_cluster_state.py
+++ /dev/null
@@ -1,414 +0,0 @@
-# -*- encoding: utf-8 -*-
-# Copyright (c) 2015 b<>com
-#
-# Authors: Jean-Emile DARTOIS
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import mock
-
-from watcher.decision_engine.model.collector import base
-from watcher.decision_engine.model import element
-from watcher.decision_engine.model import model_root as modelroot
-
-
-class FakerModelCollector(base.BaseClusterDataModelCollector):
-
- def __init__(self, config=None, osc=None):
- if config is None:
- config = mock.Mock(period=777)
- super(FakerModelCollector, self).__init__(config)
-
- @property
- def notification_endpoints(self):
- return []
-
- def execute(self):
- return self._cluster_data_model or self.generate_scenario_1()
-
- def generate_scenario_1(self):
- instances = []
-
- current_state_cluster = modelroot.ModelRoot()
- # number of nodes
- node_count = 5
- # number max of instance per node
- node_instance_count = 7
- # total number of virtual machine
- instance_count = (node_count * node_instance_count)
-
- # define ressouce ( CPU, MEM disk, ... )
- mem = element.Resource(element.ResourceType.memory)
- # 2199.954 Mhz
- num_cores = element.Resource(element.ResourceType.cpu_cores)
- disk = element.Resource(element.ResourceType.disk)
- disk_capacity = element.Resource(element.ResourceType.disk_capacity)
-
- current_state_cluster.create_resource(mem)
- current_state_cluster.create_resource(num_cores)
- current_state_cluster.create_resource(disk)
- current_state_cluster.create_resource(disk_capacity)
-
- for id_ in range(0, node_count):
- node_uuid = "Node_{0}".format(id_)
- node = element.ComputeNode(id_)
- node.uuid = node_uuid
- node.hostname = "hostname_{0}".format(id_)
-
- mem.set_capacity(node, 132)
- disk.set_capacity(node, 250)
- disk_capacity.set_capacity(node, 250)
- num_cores.set_capacity(node, 40)
- current_state_cluster.add_node(node)
-
- for i in range(0, instance_count):
- instance_uuid = "INSTANCE_{0}".format(i)
- instance = element.Instance()
- instance.uuid = instance_uuid
- mem.set_capacity(instance, 2)
- disk.set_capacity(instance, 20)
- disk_capacity.set_capacity(instance, 20)
- num_cores.set_capacity(instance, 10)
- instances.append(instance)
- current_state_cluster.add_instance(instance)
-
- current_state_cluster.get_mapping().map(
- current_state_cluster.get_node_by_uuid("Node_0"),
- current_state_cluster.get_instance_by_uuid("INSTANCE_0"))
-
- current_state_cluster.get_mapping().map(
- current_state_cluster.get_node_by_uuid("Node_0"),
- current_state_cluster.get_instance_by_uuid("INSTANCE_1"))
-
- current_state_cluster.get_mapping().map(
- current_state_cluster.get_node_by_uuid("Node_1"),
- current_state_cluster.get_instance_by_uuid("INSTANCE_2"))
-
- current_state_cluster.get_mapping().map(
- current_state_cluster.get_node_by_uuid("Node_2"),
- current_state_cluster.get_instance_by_uuid("INSTANCE_3"))
-
- current_state_cluster.get_mapping().map(
- current_state_cluster.get_node_by_uuid("Node_2"),
- current_state_cluster.get_instance_by_uuid("INSTANCE_4"))
-
- current_state_cluster.get_mapping().map(
- current_state_cluster.get_node_by_uuid("Node_2"),
- current_state_cluster.get_instance_by_uuid("INSTANCE_5"))
-
- current_state_cluster.get_mapping().map(
- current_state_cluster.get_node_by_uuid("Node_3"),
- current_state_cluster.get_instance_by_uuid("INSTANCE_6"))
-
- current_state_cluster.get_mapping().map(
- current_state_cluster.get_node_by_uuid("Node_4"),
- current_state_cluster.get_instance_by_uuid("INSTANCE_7"))
-
- return current_state_cluster
-
- def map(self, model, h_id, instance_id):
- model.get_mapping().map(
- model.get_node_by_uuid(h_id),
- model.get_instance_by_uuid(instance_id))
-
- def generate_scenario_3_with_2_nodes(self):
- instances = []
-
- root = modelroot.ModelRoot()
- # number of nodes
- node_count = 2
-
- # define ressouce ( CPU, MEM disk, ... )
- mem = element.Resource(element.ResourceType.memory)
- # 2199.954 Mhz
- num_cores = element.Resource(element.ResourceType.cpu_cores)
- disk = element.Resource(element.ResourceType.disk)
- disk_capacity = element.Resource(element.ResourceType.disk_capacity)
-
- root.create_resource(mem)
- root.create_resource(num_cores)
- root.create_resource(disk)
- root.create_resource(disk_capacity)
-
- for id_ in range(0, node_count):
- node_uuid = "Node_{0}".format(id_)
- node = element.ComputeNode(id_)
- node.uuid = node_uuid
- node.hostname = "hostname_{0}".format(id_)
-
- mem.set_capacity(node, 132)
- disk.set_capacity(node, 250)
- disk_capacity.set_capacity(node, 250)
- num_cores.set_capacity(node, 40)
- root.add_node(node)
-
- instance1 = element.Instance()
- instance1.uuid = "73b09e16-35b7-4922-804e-e8f5d9b740fc"
- mem.set_capacity(instance1, 2)
- disk.set_capacity(instance1, 20)
- disk_capacity.set_capacity(instance1, 20)
- num_cores.set_capacity(instance1, 10)
- instances.append(instance1)
- root.add_instance(instance1)
-
- instance2 = element.Instance()
- instance2.uuid = "a4cab39b-9828-413a-bf88-f76921bf1517"
- mem.set_capacity(instance2, 2)
- disk.set_capacity(instance2, 20)
- disk_capacity.set_capacity(instance2, 20)
- num_cores.set_capacity(instance2, 10)
- instances.append(instance2)
- root.add_instance(instance2)
-
- root.get_mapping().map(root.get_node_by_uuid("Node_0"),
- root.get_instance_by_uuid(str(instance1.uuid)))
-
- root.get_mapping().map(root.get_node_by_uuid("Node_1"),
- root.get_instance_by_uuid(str(instance2.uuid)))
-
- return root
-
- def generate_scenario_4_with_1_node_no_instance(self):
- current_state_cluster = modelroot.ModelRoot()
- # number of nodes
- node_count = 1
-
- # define ressouce ( CPU, MEM disk, ... )
- mem = element.Resource(element.ResourceType.memory)
- # 2199.954 Mhz
- num_cores = element.Resource(element.ResourceType.cpu_cores)
- disk = element.Resource(element.ResourceType.disk)
- disk_capacity = element.Resource(element.ResourceType.disk_capacity)
-
- current_state_cluster.create_resource(mem)
- current_state_cluster.create_resource(num_cores)
- current_state_cluster.create_resource(disk)
- current_state_cluster.create_resource(disk_capacity)
-
- for id_ in range(0, node_count):
- node_uuid = "Node_{0}".format(id_)
- node = element.ComputeNode(id_)
- node.uuid = node_uuid
- node.hostname = "hostname_{0}".format(id_)
-
- mem.set_capacity(node, 1)
- disk.set_capacity(node, 1)
- disk_capacity.set_capacity(node, 1)
- num_cores.set_capacity(node, 1)
- current_state_cluster.add_node(node)
-
- return current_state_cluster
-
- def generate_scenario_5_with_instance_disk_0(self):
- instances = []
- current_state_cluster = modelroot.ModelRoot()
- # number of nodes
- node_count = 1
- # number of instances
- instance_count = 1
-
- # define ressouce ( CPU, MEM disk, ... )
- mem = element.Resource(element.ResourceType.memory)
- # 2199.954 Mhz
- num_cores = element.Resource(element.ResourceType.cpu_cores)
- disk = element.Resource(element.ResourceType.disk)
- disk_capacity = element.Resource(element.ResourceType.disk_capacity)
-
- current_state_cluster.create_resource(mem)
- current_state_cluster.create_resource(num_cores)
- current_state_cluster.create_resource(disk)
- current_state_cluster.create_resource(disk_capacity)
-
- for id_ in range(0, node_count):
- node_uuid = "Node_{0}".format(id_)
- node = element.ComputeNode(id_)
- node.uuid = node_uuid
- node.hostname = "hostname_{0}".format(id_)
-
- mem.set_capacity(node, 4)
- disk.set_capacity(node, 4)
- disk_capacity.set_capacity(node, 4)
- num_cores.set_capacity(node, 4)
- current_state_cluster.add_node(node)
-
- for i in range(0, instance_count):
- instance_uuid = "INSTANCE_{0}".format(i)
- instance = element.Instance()
- instance.uuid = instance_uuid
- mem.set_capacity(instance, 2)
- disk.set_capacity(instance, 0)
- disk_capacity.set_capacity(instance, 0)
- num_cores.set_capacity(instance, 4)
- instances.append(instance)
- current_state_cluster.add_instance(instance)
-
- current_state_cluster.get_mapping().map(
- current_state_cluster.get_node_by_uuid("Node_0"),
- current_state_cluster.get_instance_by_uuid("INSTANCE_0"))
-
- return current_state_cluster
-
- def generate_scenario_6_with_2_nodes(self):
- instances = []
- root = modelroot.ModelRoot()
- # number of nodes
- node_count = 2
-
- # define ressouce ( CPU, MEM disk, ... )
- mem = element.Resource(element.ResourceType.memory)
- # 2199.954 Mhz
- num_cores = element.Resource(element.ResourceType.cpu_cores)
- disk = element.Resource(element.ResourceType.disk)
- disk_capacity = element.Resource(element.ResourceType.disk_capacity)
-
- root.create_resource(mem)
- root.create_resource(num_cores)
- root.create_resource(disk)
- root.create_resource(disk_capacity)
-
- for id_ in range(0, node_count):
- node_uuid = "Node_{0}".format(id_)
- node = element.ComputeNode(id_)
- node.uuid = node_uuid
- node.hostname = "hostname_{0}".format(id_)
-
- mem.set_capacity(node, 132)
- disk.set_capacity(node, 250)
- disk_capacity.set_capacity(node, 250)
- num_cores.set_capacity(node, 40)
- root.add_node(node)
-
- instance1 = element.Instance()
- instance1.uuid = "INSTANCE_1"
- mem.set_capacity(instance1, 2)
- disk.set_capacity(instance1, 20)
- disk_capacity.set_capacity(instance1, 20)
- num_cores.set_capacity(instance1, 10)
- instances.append(instance1)
- root.add_instance(instance1)
-
- instance11 = element.Instance()
- instance11.uuid = "73b09e16-35b7-4922-804e-e8f5d9b740fc"
- mem.set_capacity(instance11, 2)
- disk.set_capacity(instance11, 20)
- disk_capacity.set_capacity(instance11, 20)
- num_cores.set_capacity(instance11, 10)
- instances.append(instance11)
- root.add_instance(instance11)
-
- instance2 = element.Instance()
- instance2.uuid = "INSTANCE_3"
- mem.set_capacity(instance2, 2)
- disk.set_capacity(instance2, 20)
- disk_capacity.set_capacity(instance2, 20)
- num_cores.set_capacity(instance2, 10)
- instances.append(instance2)
- root.add_instance(instance2)
-
- instance21 = element.Instance()
- instance21.uuid = "INSTANCE_4"
- mem.set_capacity(instance21, 2)
- disk.set_capacity(instance21, 20)
- disk_capacity.set_capacity(instance21, 20)
- num_cores.set_capacity(instance21, 10)
- instances.append(instance21)
- root.add_instance(instance21)
-
- root.get_mapping().map(root.get_node_by_uuid("Node_0"),
- root.get_instance_by_uuid(str(instance1.uuid)))
- root.get_mapping().map(root.get_node_by_uuid("Node_0"),
- root.get_instance_by_uuid(str(instance11.uuid)))
-
- root.get_mapping().map(root.get_node_by_uuid("Node_1"),
- root.get_instance_by_uuid(str(instance2.uuid)))
- root.get_mapping().map(root.get_node_by_uuid("Node_1"),
- root.get_instance_by_uuid(str(instance21.uuid)))
- return root
-
- def generate_scenario_7_with_2_nodes(self):
- instances = []
- root = modelroot.ModelRoot()
- # number of nodes
- count_node = 2
-
- # define ressouce ( CPU, MEM disk, ... )
- mem = element.Resource(element.ResourceType.memory)
- # 2199.954 Mhz
- num_cores = element.Resource(element.ResourceType.cpu_cores)
- disk = element.Resource(element.ResourceType.disk)
- disk_capacity = element.Resource(element.ResourceType.disk_capacity)
-
- root.create_resource(mem)
- root.create_resource(num_cores)
- root.create_resource(disk)
- root.create_resource(disk_capacity)
-
- for id_ in range(0, count_node):
- node_uuid = "Node_{0}".format(id_)
- node = element.ComputeNode(id_)
- node.uuid = node_uuid
- node.hostname = "hostname_{0}".format(id_)
-
- mem.set_capacity(node, 132)
- disk.set_capacity(node, 250)
- disk_capacity.set_capacity(node, 250)
- num_cores.set_capacity(node, 50)
- root.add_node(node)
-
- instance1 = element.Instance()
- instance1.uuid = "cae81432-1631-4d4e-b29c-6f3acdcde906"
- mem.set_capacity(instance1, 2)
- disk.set_capacity(instance1, 20)
- disk_capacity.set_capacity(instance1, 20)
- num_cores.set_capacity(instance1, 15)
- instances.append(instance1)
- root.add_instance(instance1)
-
- instance11 = element.Instance()
- instance11.uuid = "73b09e16-35b7-4922-804e-e8f5d9b740fc"
- mem.set_capacity(instance11, 2)
- disk.set_capacity(instance11, 20)
- disk_capacity.set_capacity(instance11, 20)
- num_cores.set_capacity(instance11, 10)
- instances.append(instance11)
- root.add_instance(instance11)
-
- instance2 = element.Instance()
- instance2.uuid = "INSTANCE_3"
- mem.set_capacity(instance2, 2)
- disk.set_capacity(instance2, 20)
- disk_capacity.set_capacity(instance2, 20)
- num_cores.set_capacity(instance2, 10)
- instances.append(instance2)
- root.add_instance(instance2)
-
- instance21 = element.Instance()
- instance21.uuid = "INSTANCE_4"
- mem.set_capacity(instance21, 2)
- disk.set_capacity(instance21, 20)
- disk_capacity.set_capacity(instance21, 20)
- num_cores.set_capacity(instance21, 10)
- instances.append(instance21)
- root.add_instance(instance21)
-
- root.get_mapping().map(root.get_node_by_uuid("Node_0"),
- root.get_instance_by_uuid(str(instance1.uuid)))
- root.get_mapping().map(root.get_node_by_uuid("Node_0"),
- root.get_instance_by_uuid(str(instance11.uuid)))
-
- root.get_mapping().map(root.get_node_by_uuid("Node_1"),
- root.get_instance_by_uuid(str(instance2.uuid)))
- root.get_mapping().map(root.get_node_by_uuid("Node_1"),
- root.get_instance_by_uuid(str(instance21.uuid)))
- return root
diff --git a/watcher/tests/decision_engine/strategy/strategies/test_basic_consolidation.py b/watcher/tests/decision_engine/strategy/strategies/test_basic_consolidation.py
index f36bc033b..1a1389a72 100644
--- a/watcher/tests/decision_engine/strategy/strategies/test_basic_consolidation.py
+++ b/watcher/tests/decision_engine/strategy/strategies/test_basic_consolidation.py
@@ -26,10 +26,8 @@ from watcher.decision_engine.model.collector import nova
from watcher.decision_engine.model import model_root
from watcher.decision_engine.strategy import strategies
from watcher.tests import base
-from watcher.tests.decision_engine.strategy.strategies \
- import faker_cluster_state
-from watcher.tests.decision_engine.strategy.strategies \
- import faker_metrics_collector
+from watcher.tests.decision_engine.model import faker_cluster_state
+from watcher.tests.decision_engine.model import faker_metrics_collector
class TestBasicConsolidation(base.TestCase):
diff --git a/watcher/tests/decision_engine/strategy/strategies/test_dummy_strategy.py b/watcher/tests/decision_engine/strategy/strategies/test_dummy_strategy.py
index 2878a3c44..57626c943 100644
--- a/watcher/tests/decision_engine/strategy/strategies/test_dummy_strategy.py
+++ b/watcher/tests/decision_engine/strategy/strategies/test_dummy_strategy.py
@@ -21,8 +21,7 @@ from watcher.common import utils
from watcher.decision_engine.model import model_root
from watcher.decision_engine.strategy import strategies
from watcher.tests import base
-from watcher.tests.decision_engine.strategy.strategies import \
- faker_cluster_state
+from watcher.tests.decision_engine.model import faker_cluster_state
class TestDummyStrategy(base.TestCase):
diff --git a/watcher/tests/decision_engine/strategy/strategies/test_dummy_with_scorer.py b/watcher/tests/decision_engine/strategy/strategies/test_dummy_with_scorer.py
index 11cf33162..cb6fa2877 100644
--- a/watcher/tests/decision_engine/strategy/strategies/test_dummy_with_scorer.py
+++ b/watcher/tests/decision_engine/strategy/strategies/test_dummy_with_scorer.py
@@ -21,8 +21,7 @@ from watcher.common import utils
from watcher.decision_engine.model import model_root
from watcher.decision_engine.strategy import strategies
from watcher.tests import base
-from watcher.tests.decision_engine.strategy.strategies import \
- faker_cluster_state
+from watcher.tests.decision_engine.model import faker_cluster_state
class TestDummyWithScorer(base.TestCase):
diff --git a/watcher/tests/decision_engine/strategy/strategies/test_outlet_temp_control.py b/watcher/tests/decision_engine/strategy/strategies/test_outlet_temp_control.py
index 89c8ac8e6..3f21c4d3a 100644
--- a/watcher/tests/decision_engine/strategy/strategies/test_outlet_temp_control.py
+++ b/watcher/tests/decision_engine/strategy/strategies/test_outlet_temp_control.py
@@ -26,10 +26,8 @@ from watcher.decision_engine.model import element
from watcher.decision_engine.model import model_root
from watcher.decision_engine.strategy import strategies
from watcher.tests import base
-from watcher.tests.decision_engine.strategy.strategies \
- import faker_cluster_state
-from watcher.tests.decision_engine.strategy.strategies \
- import faker_metrics_collector
+from watcher.tests.decision_engine.model import faker_cluster_state
+from watcher.tests.decision_engine.model import faker_metrics_collector
class TestOutletTempControl(base.TestCase):
diff --git a/watcher/tests/decision_engine/strategy/strategies/test_uniform_airflow.py b/watcher/tests/decision_engine/strategy/strategies/test_uniform_airflow.py
index c0a85a98b..9a1591e22 100644
--- a/watcher/tests/decision_engine/strategy/strategies/test_uniform_airflow.py
+++ b/watcher/tests/decision_engine/strategy/strategies/test_uniform_airflow.py
@@ -26,10 +26,8 @@ from watcher.decision_engine.model import element
from watcher.decision_engine.model import model_root
from watcher.decision_engine.strategy import strategies
from watcher.tests import base
-from watcher.tests.decision_engine.strategy.strategies \
- import faker_cluster_state
-from watcher.tests.decision_engine.strategy.strategies \
- import faker_metrics_collector
+from watcher.tests.decision_engine.model import faker_cluster_state
+from watcher.tests.decision_engine.model import faker_metrics_collector
class TestUniformAirflow(base.TestCase):
diff --git a/watcher/tests/decision_engine/strategy/strategies/test_vm_workload_consolidation.py b/watcher/tests/decision_engine/strategy/strategies/test_vm_workload_consolidation.py
index fcdb9976e..c3be01d64 100644
--- a/watcher/tests/decision_engine/strategy/strategies/test_vm_workload_consolidation.py
+++ b/watcher/tests/decision_engine/strategy/strategies/test_vm_workload_consolidation.py
@@ -24,8 +24,7 @@ from watcher.common import exception
from watcher.decision_engine.model import model_root
from watcher.decision_engine.strategy import strategies
from watcher.tests import base
-from watcher.tests.decision_engine.strategy.strategies \
- import faker_cluster_and_metrics
+from watcher.tests.decision_engine.model import faker_cluster_and_metrics
class TestVMWorkloadConsolidation(base.TestCase):
diff --git a/watcher/tests/decision_engine/strategy/strategies/test_workload_balance.py b/watcher/tests/decision_engine/strategy/strategies/test_workload_balance.py
index 6bed8d3eb..59ab549a0 100644
--- a/watcher/tests/decision_engine/strategy/strategies/test_workload_balance.py
+++ b/watcher/tests/decision_engine/strategy/strategies/test_workload_balance.py
@@ -26,10 +26,8 @@ from watcher.decision_engine.model import element
from watcher.decision_engine.model import model_root
from watcher.decision_engine.strategy import strategies
from watcher.tests import base
-from watcher.tests.decision_engine.strategy.strategies \
- import faker_cluster_state
-from watcher.tests.decision_engine.strategy.strategies \
- import faker_metrics_collector
+from watcher.tests.decision_engine.model import faker_cluster_state
+from watcher.tests.decision_engine.model import faker_metrics_collector
class TestWorkloadBalance(base.TestCase):
diff --git a/watcher/tests/decision_engine/strategy/strategies/test_workload_stabilization.py b/watcher/tests/decision_engine/strategy/strategies/test_workload_stabilization.py
index 130575624..16e4f79f2 100644
--- a/watcher/tests/decision_engine/strategy/strategies/test_workload_stabilization.py
+++ b/watcher/tests/decision_engine/strategy/strategies/test_workload_stabilization.py
@@ -23,10 +23,8 @@ from watcher.common import utils
from watcher.decision_engine.model import model_root
from watcher.decision_engine.strategy import strategies
from watcher.tests import base
-from watcher.tests.decision_engine.strategy.strategies \
- import faker_cluster_state
-from watcher.tests.decision_engine.strategy.strategies \
- import faker_metrics_collector
+from watcher.tests.decision_engine.model import faker_cluster_state
+from watcher.tests.decision_engine.model import faker_metrics_collector
class TestWorkloadStabilization(base.TestCase):
diff --git a/watcher/tests/decision_engine/test_scheduling.py b/watcher/tests/decision_engine/test_scheduling.py
index c8463f2cf..f414ad300 100644
--- a/watcher/tests/decision_engine/test_scheduling.py
+++ b/watcher/tests/decision_engine/test_scheduling.py
@@ -24,8 +24,7 @@ import mock
from watcher.decision_engine.loading import default as default_loading
from watcher.decision_engine import scheduling
from watcher.tests import base
-from watcher.tests.decision_engine.strategy.strategies import \
- faker_cluster_state
+from watcher.tests.decision_engine.model import faker_cluster_state
class TestDecisionEngineSchedulingService(base.TestCase):