Merge "Multi datasource support for Basic Consolidation"

This commit is contained in:
Jenkins 2017-01-16 09:54:24 +00:00 committed by Gerrit Code Review
commit fc9eb6e995
12 changed files with 472 additions and 90 deletions

View File

@ -245,22 +245,30 @@ Querying metrics
A large set of metrics, generated by OpenStack modules, can be used in your A large set of metrics, generated by OpenStack modules, can be used in your
strategy implementation. To collect these metrics, Watcher provides a strategy implementation. To collect these metrics, Watcher provides a
`Helper`_ to the Ceilometer API, which makes this API reusable and easier `Helper`_ for two data sources which are `Ceilometer`_ and `Monasca`_. If you
to used. wish to query metrics from a different data source, you can implement your own
and directly use it from within your new strategy. Indeed, strategies in
Watcher have the cluster data models decoupled from the data sources which
means that you may keep the former while changing the latter.
The recommended way for you to support a new data source is to implement a new
helper that would encapsulate within separate methods the queries you need to
perform. To then use it, you would just have to instantiate it within your
strategy.
If you want to use your own metrics database backend, please refer to the If you want to use Ceilometer but with your own metrics database backend,
`Ceilometer developer guide`_. Indeed, Ceilometer's pluggable model allows please refer to the `Ceilometer developer guide`_. The list of the available
for various types of backends. A list of the available backends is located Ceilometer backends is located here_. The `Ceilosca`_ project is a good example
here_. The Ceilosca project is a good example of how to create your own of how to create your own pluggable backend. Moreover, if your strategy
pluggable backend. requires new metrics not covered by Ceilometer, you can add them through a
`Ceilometer plugin`_.
Finally, if your strategy requires new metrics not covered by Ceilometer, you
can add them through a Ceilometer `plugin`_.
.. _`Helper`: https://github.com/openstack/watcher/blob/master/watcher/decision_engine/cluster/history/ceilometer.py .. _`Helper`: https://github.com/openstack/watcher/blob/master/watcher/decision_engine/cluster/history/ceilometer.py
.. _`Ceilometer developer guide`: http://docs.openstack.org/developer/ceilometer/architecture.html#storing-the-data .. _`Ceilometer developer guide`: http://docs.openstack.org/developer/ceilometer/architecture.html#storing-the-data
.. _`Ceilometer`: http://docs.openstack.org/developer/ceilometer/
.. _`Monasca`: https://github.com/openstack/monasca-api/blob/master/docs/monasca-api-spec.md
.. _`here`: http://docs.openstack.org/developer/ceilometer/install/dbreco.html#choosing-a-database-backend .. _`here`: http://docs.openstack.org/developer/ceilometer/install/dbreco.html#choosing-a-database-backend
.. _`plugin`: http://docs.openstack.org/developer/ceilometer/plugins.html .. _`Ceilometer plugin`: http://docs.openstack.org/developer/ceilometer/plugins.html
.. _`Ceilosca`: https://github.com/openstack/monasca-ceilometer/blob/master/ceilosca/ceilometer/storage/impl_monasca.py .. _`Ceilosca`: https://github.com/openstack/monasca-ceilometer/blob/master/ceilosca/ceilometer/storage/impl_monasca.py

View File

@ -374,6 +374,11 @@ class NoMetricValuesForInstance(WatcherException):
msg_fmt = _("No values returned by %(resource_id)s for %(metric_name)s.") msg_fmt = _("No values returned by %(resource_id)s for %(metric_name)s.")
class UnsupportedDataSource(UnsupportedError):
msg_fmt = _("Datasource %(datasource)s is not supported "
"by strategy %(strategy)s")
class NoSuchMetricForHost(WatcherException): class NoSuchMetricForHost(WatcherException):
msg_fmt = _("No %(metric)s metric for %(host)s found.") msg_fmt = _("No %(metric)s metric for %(host)s found.")

View File

@ -35,11 +35,13 @@ migration is possible on your OpenStack cluster.
""" """
from oslo_config import cfg
from oslo_log import log from oslo_log import log
from watcher._i18n import _, _LE, _LI, _LW from watcher._i18n import _, _LE, _LI, _LW
from watcher.common import exception from watcher.common import exception
from watcher.datasource import ceilometer as ceil from watcher.datasource import ceilometer as ceil
from watcher.datasource import monasca as mon
from watcher.decision_engine.model import element from watcher.decision_engine.model import element
from watcher.decision_engine.strategy.strategies import base from watcher.decision_engine.strategy.strategies import base
@ -52,6 +54,15 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
HOST_CPU_USAGE_METRIC_NAME = 'compute.node.cpu.percent' HOST_CPU_USAGE_METRIC_NAME = 'compute.node.cpu.percent'
INSTANCE_CPU_USAGE_METRIC_NAME = 'cpu_util' INSTANCE_CPU_USAGE_METRIC_NAME = 'cpu_util'
METRIC_NAMES = dict(
ceilometer=dict(
host_cpu_usage='compute.node.cpu.percent',
instance_cpu_usage='cpu_util'),
monasca=dict(
host_cpu_usage='cpu.percent',
instance_cpu_usage='vm.cpu.utilization_perc'),
)
MIGRATION = "migrate" MIGRATION = "migrate"
CHANGE_NOVA_SERVICE_STATE = "change_nova_service_state" CHANGE_NOVA_SERVICE_STATE = "change_nova_service_state"
@ -73,6 +84,7 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
self.efficacy = 100 self.efficacy = 100
self._ceilometer = None self._ceilometer = None
self._monasca = None
# TODO(jed): improve threshold overbooking? # TODO(jed): improve threshold overbooking?
self.threshold_mem = 1 self.threshold_mem = 1
@ -111,6 +123,16 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
}, },
} }
@classmethod
def get_config_opts(cls):
return [
cfg.StrOpt(
"datasource",
help="Data source to use in order to query the needed metrics",
default="ceilometer",
choices=["ceilometer", "monasca"]),
]
@property @property
def ceilometer(self): def ceilometer(self):
if self._ceilometer is None: if self._ceilometer is None:
@ -121,6 +143,16 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
def ceilometer(self, ceilometer): def ceilometer(self, ceilometer):
self._ceilometer = ceilometer self._ceilometer = ceilometer
@property
def monasca(self):
if self._monasca is None:
self._monasca = mon.MonascaHelper(osc=self.osc)
return self._monasca
@monasca.setter
def monasca(self, monasca):
self._monasca = monasca
def check_migration(self, source_node, destination_node, def check_migration(self, source_node, destination_node,
instance_to_migrate): instance_to_migrate):
"""Check if the migration is possible """Check if the migration is possible
@ -221,6 +253,64 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
# TODO(jed): take in account weight # TODO(jed): take in account weight
return (score_cores + score_disk + score_memory) / 3 return (score_cores + score_disk + score_memory) / 3
def get_node_cpu_usage(self, node):
metric_name = self.METRIC_NAMES[
self.config.datasource]['host_cpu_usage']
if self.config.datasource == "ceilometer":
resource_id = "%s_%s" % (node.uuid, node.hostname)
return self.ceilometer.statistic_aggregation(
resource_id=resource_id,
meter_name=metric_name,
period="7200",
aggregate='avg',
)
elif self.config.datasource == "monasca":
statistics = self.monasca.statistic_aggregation(
meter_name=metric_name,
dimensions=dict(hostname=node.uuid),
period=7200,
aggregate='avg'
)
cpu_usage = None
for stat in statistics:
avg_col_idx = stat['columns'].index('avg')
values = [r[avg_col_idx] for r in stat['statistics']]
value = float(sum(values)) / len(values)
cpu_usage = value
return cpu_usage
raise exception.UnsupportedDataSource(
strategy=self.name, datasource=self.config.datasource)
def get_instance_cpu_usage(self, instance):
metric_name = self.METRIC_NAMES[
self.config.datasource]['instance_cpu_usage']
if self.config.datasource == "ceilometer":
return self.ceilometer.statistic_aggregation(
resource_id=instance.uuid,
meter_name=metric_name,
period="7200",
aggregate='avg'
)
elif self.config.datasource == "monasca":
statistics = self.monasca.statistic_aggregation(
meter_name=metric_name,
dimensions=dict(resource_id=instance.uuid),
period=7200,
aggregate='avg'
)
cpu_usage = None
for stat in statistics:
avg_col_idx = stat['columns'].index('avg')
values = [r[avg_col_idx] for r in stat['statistics']]
value = float(sum(values)) / len(values)
cpu_usage = value
return cpu_usage
raise exception.UnsupportedDataSource(
strategy=self.name, datasource=self.config.datasource)
def calculate_score_node(self, node): def calculate_score_node(self, node):
"""Calculate the score that represent the utilization level """Calculate the score that represent the utilization level
@ -228,19 +318,16 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
:return: Score for the given compute node :return: Score for the given compute node
:rtype: float :rtype: float
""" """
resource_id = "%s_%s" % (node.uuid, node.hostname) host_avg_cpu_util = self.get_node_cpu_usage(node)
host_avg_cpu_util = self.ceilometer.statistic_aggregation(
resource_id=resource_id,
meter_name=self.HOST_CPU_USAGE_METRIC_NAME,
period="7200",
aggregate='avg')
if host_avg_cpu_util is None: if host_avg_cpu_util is None:
resource_id = "%s_%s" % (node.uuid, node.hostname)
LOG.error( LOG.error(
_LE("No values returned by %(resource_id)s " _LE("No values returned by %(resource_id)s "
"for %(metric_name)s") % dict( "for %(metric_name)s") % dict(
resource_id=resource_id, resource_id=resource_id,
metric_name=self.HOST_CPU_USAGE_METRIC_NAME)) metric_name=self.METRIC_NAMES[
self.config.datasource]['host_cpu_usage']))
host_avg_cpu_util = 100 host_avg_cpu_util = 100
cpu_capacity = self.compute_model.get_resource_by_uuid( cpu_capacity = self.compute_model.get_resource_by_uuid(
@ -253,7 +340,7 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
def calculate_migration_efficacy(self): def calculate_migration_efficacy(self):
"""Calculate migration efficacy """Calculate migration efficacy
:return: The efficacy tells us that every VM migration resulted :return: The efficacy tells us that every instance migration resulted
in releasing on node in releasing on node
""" """
if self.number_of_migrations > 0: if self.number_of_migrations > 0:
@ -268,19 +355,14 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
:param instance: the virtual machine :param instance: the virtual machine
:return: score :return: score
""" """
instance_cpu_utilization = self.ceilometer. \ instance_cpu_utilization = self.get_instance_cpu_usage(instance)
statistic_aggregation(
resource_id=instance.uuid,
meter_name=self.INSTANCE_CPU_USAGE_METRIC_NAME,
period="7200",
aggregate='avg'
)
if instance_cpu_utilization is None: if instance_cpu_utilization is None:
LOG.error( LOG.error(
_LE("No values returned by %(resource_id)s " _LE("No values returned by %(resource_id)s "
"for %(metric_name)s") % dict( "for %(metric_name)s") % dict(
resource_id=instance.uuid, resource_id=instance.uuid,
metric_name=self.INSTANCE_CPU_USAGE_METRIC_NAME)) metric_name=self.METRIC_NAMES[
self.config.datasource]['instance_cpu_usage']))
instance_cpu_utilization = 100 instance_cpu_utilization = 100
cpu_capacity = self.compute_model.get_resource_by_uuid( cpu_capacity = self.compute_model.get_resource_by_uuid(

View File

@ -16,12 +16,10 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import random
import oslo_utils import oslo_utils
class FakerMetricsCollector(object): class FakeCeilometerMetrics(object):
def __init__(self): def __init__(self):
self.emptytype = "" self.emptytype = ""
@ -46,19 +44,20 @@ class FakerMetricsCollector(object):
elif meter_name == "hardware.ipmi.node.airflow": elif meter_name == "hardware.ipmi.node.airflow":
result = self.get_average_airflow(resource_id) result = self.get_average_airflow(resource_id)
elif meter_name == "hardware.ipmi.node.temperature": elif meter_name == "hardware.ipmi.node.temperature":
result = self.get_average_inletT(resource_id) result = self.get_average_inlet_t(resource_id)
elif meter_name == "hardware.ipmi.node.power": elif meter_name == "hardware.ipmi.node.power":
result = self.get_average_power(resource_id) result = self.get_average_power(resource_id)
return result return result
def mock_get_statistics_wb(self, resource_id, meter_name, period, def mock_get_statistics_wb(self, resource_id, meter_name, period,
aggregate='avg'): aggregate='avg'):
result = 0 result = 0.0
if meter_name == "cpu_util": if meter_name == "cpu_util":
result = self.get_average_usage_instance_cpu_wb(resource_id) result = self.get_average_usage_instance_cpu_wb(resource_id)
return result return result
def get_average_outlet_temperature(self, uuid): @staticmethod
def get_average_outlet_temperature(uuid):
"""The average outlet temperature for host""" """The average outlet temperature for host"""
mock = {} mock = {}
mock['Node_0'] = 30 mock['Node_0'] = 30
@ -68,14 +67,15 @@ class FakerMetricsCollector(object):
mock[uuid] = 100 mock[uuid] = 100
return mock[str(uuid)] return mock[str(uuid)]
def get_usage_node_ram(self, uuid): @staticmethod
def get_usage_node_ram(uuid):
mock = {} mock = {}
# Ceilometer returns hardware.memory.used samples in KB. # Ceilometer returns hardware.memory.used samples in KB.
mock['Node_0'] = 7*oslo_utils.units.Ki mock['Node_0'] = 7 * oslo_utils.units.Ki
mock['Node_1'] = 5*oslo_utils.units.Ki mock['Node_1'] = 5 * oslo_utils.units.Ki
mock['Node_2'] = 29*oslo_utils.units.Ki mock['Node_2'] = 29 * oslo_utils.units.Ki
mock['Node_3'] = 8*oslo_utils.units.Ki mock['Node_3'] = 8 * oslo_utils.units.Ki
mock['Node_4'] = 4*oslo_utils.units.Ki mock['Node_4'] = 4 * oslo_utils.units.Ki
if uuid not in mock.keys(): if uuid not in mock.keys():
# mock[uuid] = random.randint(1, 4) # mock[uuid] = random.randint(1, 4)
@ -83,7 +83,8 @@ class FakerMetricsCollector(object):
return float(mock[str(uuid)]) return float(mock[str(uuid)])
def get_average_airflow(self, uuid): @staticmethod
def get_average_airflow(uuid):
"""The average outlet temperature for host""" """The average outlet temperature for host"""
mock = {} mock = {}
mock['Node_0'] = 400 mock['Node_0'] = 400
@ -93,7 +94,8 @@ class FakerMetricsCollector(object):
mock[uuid] = 200 mock[uuid] = 200
return mock[str(uuid)] return mock[str(uuid)]
def get_average_inletT(self, uuid): @staticmethod
def get_average_inlet_t(uuid):
"""The average outlet temperature for host""" """The average outlet temperature for host"""
mock = {} mock = {}
mock['Node_0'] = 24 mock['Node_0'] = 24
@ -102,7 +104,8 @@ class FakerMetricsCollector(object):
mock[uuid] = 28 mock[uuid] = 28
return mock[str(uuid)] return mock[str(uuid)]
def get_average_power(self, uuid): @staticmethod
def get_average_power(uuid):
"""The average outlet temperature for host""" """The average outlet temperature for host"""
mock = {} mock = {}
mock['Node_0'] = 260 mock['Node_0'] = 260
@ -111,12 +114,13 @@ class FakerMetricsCollector(object):
mock[uuid] = 200 mock[uuid] = 200
return mock[str(uuid)] return mock[str(uuid)]
def get_usage_node_cpu(self, uuid): @staticmethod
def get_usage_node_cpu(uuid):
"""The last VM CPU usage values to average """The last VM CPU usage values to average
:param uuid:00 :param uuid:00
:return: :return:
""" """
# query influxdb stream # query influxdb stream
# compute in stream # compute in stream
@ -151,12 +155,13 @@ class FakerMetricsCollector(object):
return float(mock[str(uuid)]) return float(mock[str(uuid)])
def get_average_usage_instance_cpu_wb(self, uuid): @staticmethod
def get_average_usage_instance_cpu_wb(uuid):
"""The last VM CPU usage values to average """The last VM CPU usage values to average
:param uuid:00 :param uuid:00
:return: :return:
""" """
# query influxdb stream # query influxdb stream
# compute in stream # compute in stream
@ -171,7 +176,8 @@ class FakerMetricsCollector(object):
mock['INSTANCE_4'] = 10 mock['INSTANCE_4'] = 10
return float(mock[str(uuid)]) return float(mock[str(uuid)])
def get_average_usage_instance_cpu(self, uuid): @staticmethod
def get_average_usage_instance_cpu(uuid):
"""The last VM CPU usage values to average """The last VM CPU usage values to average
:param uuid:00 :param uuid:00
@ -204,7 +210,8 @@ class FakerMetricsCollector(object):
return mock[str(uuid)] return mock[str(uuid)]
def get_average_usage_instance_memory(self, uuid): @staticmethod
def get_average_usage_instance_memory(uuid):
mock = {} mock = {}
# node 0 # node 0
mock['INSTANCE_0'] = 2 mock['INSTANCE_0'] = 2
@ -227,7 +234,8 @@ class FakerMetricsCollector(object):
return mock[str(uuid)] return mock[str(uuid)]
def get_average_usage_instance_disk(self, uuid): @staticmethod
def get_average_usage_instance_disk(uuid):
mock = {} mock = {}
# node 0 # node 0
mock['INSTANCE_0'] = 2 mock['INSTANCE_0'] = 2
@ -250,6 +258,3 @@ class FakerMetricsCollector(object):
mock[uuid] = 4 mock[uuid] = 4
return mock[str(uuid)] return mock[str(uuid)]
def get_virtual_machine_capacity(self, instance_uuid):
return random.randint(1, 4)

View File

@ -102,12 +102,11 @@ class FakeCeilometerMetrics(object):
Returns relative node CPU utilization <0, 100>. Returns relative node CPU utilization <0, 100>.
:param r_id: resource id :param r_id: resource id
""" """
uuid = '%s_%s' % (r_id.split('_')[0], r_id.split('_')[1])
id = '%s_%s' % (r_id.split('_')[0], r_id.split('_')[1]) instances = self.model.get_mapping().get_node_instances_by_uuid(uuid)
instances = self.model.get_mapping().get_node_instances_by_uuid(id)
util_sum = 0.0 util_sum = 0.0
node_cpu_cores = self.model.get_resource_by_uuid( node_cpu_cores = self.model.get_resource_by_uuid(
element.ResourceType.cpu_cores).get_capacity_by_uuid(id) element.ResourceType.cpu_cores).get_capacity_by_uuid(uuid)
for instance_uuid in instances: for instance_uuid in instances:
instance_cpu_cores = self.model.get_resource_by_uuid( instance_cpu_cores = self.model.get_resource_by_uuid(
element.ResourceType.cpu_cores).\ element.ResourceType.cpu_cores).\
@ -118,7 +117,8 @@ class FakeCeilometerMetrics(object):
util_sum /= node_cpu_cores util_sum /= node_cpu_cores
return util_sum * 100.0 return util_sum * 100.0
def get_instance_cpu_util(self, r_id): @staticmethod
def get_instance_cpu_util(r_id):
instance_cpu_util = dict() instance_cpu_util = dict()
instance_cpu_util['INSTANCE_0'] = 10 instance_cpu_util['INSTANCE_0'] = 10
instance_cpu_util['INSTANCE_1'] = 30 instance_cpu_util['INSTANCE_1'] = 30
@ -132,7 +132,8 @@ class FakeCeilometerMetrics(object):
instance_cpu_util['INSTANCE_9'] = 100 instance_cpu_util['INSTANCE_9'] = 100
return instance_cpu_util[str(r_id)] return instance_cpu_util[str(r_id)]
def get_instance_ram_util(self, r_id): @staticmethod
def get_instance_ram_util(r_id):
instance_ram_util = dict() instance_ram_util = dict()
instance_ram_util['INSTANCE_0'] = 1 instance_ram_util['INSTANCE_0'] = 1
instance_ram_util['INSTANCE_1'] = 2 instance_ram_util['INSTANCE_1'] = 2
@ -146,7 +147,8 @@ class FakeCeilometerMetrics(object):
instance_ram_util['INSTANCE_9'] = 8 instance_ram_util['INSTANCE_9'] = 8
return instance_ram_util[str(r_id)] return instance_ram_util[str(r_id)]
def get_instance_disk_root_size(self, r_id): @staticmethod
def get_instance_disk_root_size(r_id):
instance_disk_util = dict() instance_disk_util = dict()
instance_disk_util['INSTANCE_0'] = 10 instance_disk_util['INSTANCE_0'] = 10
instance_disk_util['INSTANCE_1'] = 15 instance_disk_util['INSTANCE_1'] = 15

View File

@ -0,0 +1,267 @@
# -*- encoding: utf-8 -*-
# Copyright (c) 2015 b<>com
#
# Authors: Jean-Emile DARTOIS <jean-emile.dartois@b-com.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oslo_utils
class FakeMonascaMetrics(object):
def __init__(self):
self.emptytype = ""
def empty_one_metric(self, emptytype):
self.emptytype = emptytype
def mock_get_statistics(self, meter_name, dimensions, period,
aggregate='avg'):
resource_id = dimensions.get(
"resource_id") or dimensions.get("hostname")
result = 0.0
if meter_name == "cpu.percent":
result = self.get_usage_node_cpu(resource_id)
elif meter_name == "vm.cpu.utilization_perc":
result = self.get_average_usage_instance_cpu(resource_id)
# elif meter_name == "hardware.memory.used":
# result = self.get_usage_node_ram(resource_id)
# elif meter_name == "memory.resident":
# result = self.get_average_usage_instance_memory(resource_id)
# elif meter_name == "hardware.ipmi.node.outlet_temperature":
# result = self.get_average_outlet_temperature(resource_id)
# elif meter_name == "hardware.ipmi.node.airflow":
# result = self.get_average_airflow(resource_id)
# elif meter_name == "hardware.ipmi.node.temperature":
# result = self.get_average_inlet_t(resource_id)
# elif meter_name == "hardware.ipmi.node.power":
# result = self.get_average_power(resource_id)
return result
def mock_get_statistics_wb(self, meter_name, dimensions, period,
aggregate='avg'):
resource_id = dimensions.get(
"resource_id") or dimensions.get("hostname")
result = 0.0
if meter_name == "vm.cpu.utilization_perc":
result = self.get_average_usage_instance_cpu_wb(resource_id)
return result
@staticmethod
def get_average_outlet_temperature(uuid):
"""The average outlet temperature for host"""
measurements = {}
measurements['Node_0'] = 30
# use a big value to make sure it exceeds threshold
measurements['Node_1'] = 100
if uuid not in measurements.keys():
measurements[uuid] = 100
return [{'columns': ['avg'],
'statistics': [[float(measurements[str(uuid)])]]}]
@staticmethod
def get_usage_node_ram(uuid):
measurements = {}
# Monasca returns hardware.memory.used samples in KB.
measurements['Node_0'] = 7 * oslo_utils.units.Ki
measurements['Node_1'] = 5 * oslo_utils.units.Ki
measurements['Node_2'] = 29 * oslo_utils.units.Ki
measurements['Node_3'] = 8 * oslo_utils.units.Ki
measurements['Node_4'] = 4 * oslo_utils.units.Ki
if uuid not in measurements.keys():
# measurements[uuid] = random.randint(1, 4)
measurements[uuid] = 8
return float(measurements[str(uuid)])
@staticmethod
def get_average_airflow(uuid):
"""The average outlet temperature for host"""
measurements = {}
measurements['Node_0'] = 400
# use a big value to make sure it exceeds threshold
measurements['Node_1'] = 100
if uuid not in measurements.keys():
measurements[uuid] = 200
return [{'columns': ['avg'],
'statistics': [[float(measurements[str(uuid)])]]}]
@staticmethod
def get_average_inlet_t(uuid):
"""The average outlet temperature for host"""
measurements = {}
measurements['Node_0'] = 24
measurements['Node_1'] = 26
if uuid not in measurements.keys():
measurements[uuid] = 28
return [{'columns': ['avg'],
'statistics': [[float(measurements[str(uuid)])]]}]
@staticmethod
def get_average_power(uuid):
"""The average outlet temperature for host"""
measurements = {}
measurements['Node_0'] = 260
measurements['Node_1'] = 240
if uuid not in measurements.keys():
measurements[uuid] = 200
return [{'columns': ['avg'],
'statistics': [[float(measurements[str(uuid)])]]}]
@staticmethod
def get_usage_node_cpu(uuid):
"""The last VM CPU usage values to average
:param uuid:00
:return:
"""
# query influxdb stream
# compute in stream
# Normalize
measurements = {}
# node 0
measurements['Node_0'] = 7
measurements['Node_1'] = 7
# node 1
measurements['Node_2'] = 80
# node 2
measurements['Node_3'] = 5
measurements['Node_4'] = 5
measurements['Node_5'] = 10
# node 3
measurements['Node_6'] = 8
measurements['Node_19'] = 10
# node 4
measurements['INSTANCE_7'] = 4
if uuid not in measurements.keys():
# measurements[uuid] = random.randint(1, 4)
measurements[uuid] = 8
# import ipdb; ipdb.set_trace()
return [{'columns': ['avg'],
'statistics': [[float(measurements[str(uuid)])]]}]
# return float(measurements[str(uuid)])
@staticmethod
def get_average_usage_instance_cpu_wb(uuid):
"""The last VM CPU usage values to average
:param uuid:00
:return:
"""
# query influxdb stream
# compute in stream
# Normalize
measurements = {}
# node 0
measurements['INSTANCE_1'] = 80
measurements['73b09e16-35b7-4922-804e-e8f5d9b740fc'] = 50
# node 1
measurements['INSTANCE_3'] = 20
measurements['INSTANCE_4'] = 10
return [{'columns': ['avg'],
'statistics': [[float(measurements[str(uuid)])]]}]
@staticmethod
def get_average_usage_instance_cpu(uuid):
"""The last VM CPU usage values to average
:param uuid:00
:return:
"""
# query influxdb stream
# compute in stream
# Normalize
measurements = {}
# node 0
measurements['INSTANCE_0'] = 7
measurements['INSTANCE_1'] = 7
# node 1
measurements['INSTANCE_2'] = 10
# node 2
measurements['INSTANCE_3'] = 5
measurements['INSTANCE_4'] = 5
measurements['INSTANCE_5'] = 10
# node 3
measurements['INSTANCE_6'] = 8
# node 4
measurements['INSTANCE_7'] = 4
if uuid not in measurements.keys():
# measurements[uuid] = random.randint(1, 4)
measurements[uuid] = 8
return [{'columns': ['avg'],
'statistics': [[float(measurements[str(uuid)])]]}]
@staticmethod
def get_average_usage_instance_memory(uuid):
measurements = {}
# node 0
measurements['INSTANCE_0'] = 2
measurements['INSTANCE_1'] = 5
# node 1
measurements['INSTANCE_2'] = 5
# node 2
measurements['INSTANCE_3'] = 8
measurements['INSTANCE_4'] = 5
measurements['INSTANCE_5'] = 16
# node 3
measurements['INSTANCE_6'] = 8
# node 4
measurements['INSTANCE_7'] = 4
if uuid not in measurements.keys():
# measurements[uuid] = random.randint(1, 4)
measurements[uuid] = 10
return [{'columns': ['avg'],
'statistics': [[float(measurements[str(uuid)])]]}]
@staticmethod
def get_average_usage_instance_disk(uuid):
measurements = {}
# node 0
measurements['INSTANCE_0'] = 2
measurements['INSTANCE_1'] = 2
# node 1
measurements['INSTANCE_2'] = 2
# node 2
measurements['INSTANCE_3'] = 10
measurements['INSTANCE_4'] = 15
measurements['INSTANCE_5'] = 20
# node 3
measurements['INSTANCE_6'] = 8
# node 4
measurements['INSTANCE_7'] = 4
if uuid not in measurements.keys():
# measurements[uuid] = random.randint(1, 4)
measurements[uuid] = 4
return [{'columns': ['avg'],
'statistics': [[float(measurements[str(uuid)])]]}]

View File

@ -24,35 +24,37 @@ from watcher.decision_engine.strategy import strategies
from watcher import objects from watcher import objects
from watcher.tests.db import base from watcher.tests.db import base
from watcher.tests.db import utils as db_utils from watcher.tests.db import utils as db_utils
from watcher.tests.decision_engine.model import ceilometer_metrics as fake
from watcher.tests.decision_engine.model import faker_cluster_state from watcher.tests.decision_engine.model import faker_cluster_state
from watcher.tests.decision_engine.model import faker_metrics_collector as fake
from watcher.tests.objects import utils as obj_utils from watcher.tests.objects import utils as obj_utils
class SolutionFaker(object): class SolutionFaker(object):
@staticmethod @staticmethod
def build(): def build():
metrics = fake.FakerMetricsCollector() metrics = fake.FakeCeilometerMetrics()
current_state_cluster = faker_cluster_state.FakerModelCollector() current_state_cluster = faker_cluster_state.FakerModelCollector()
sercon = strategies.BasicConsolidation(config=mock.Mock()) strategy = strategies.BasicConsolidation(
sercon._compute_model = current_state_cluster.generate_scenario_1() config=mock.Mock(datasource="ceilometer"))
sercon.ceilometer = mock.MagicMock( strategy._compute_model = current_state_cluster.generate_scenario_1()
strategy.ceilometer = mock.MagicMock(
get_statistics=metrics.mock_get_statistics) get_statistics=metrics.mock_get_statistics)
return sercon.execute() return strategy.execute()
class SolutionFakerSingleHyp(object): class SolutionFakerSingleHyp(object):
@staticmethod @staticmethod
def build(): def build():
metrics = fake.FakerMetricsCollector() metrics = fake.FakeCeilometerMetrics()
current_state_cluster = faker_cluster_state.FakerModelCollector() current_state_cluster = faker_cluster_state.FakerModelCollector()
sercon = strategies.BasicConsolidation(config=mock.Mock()) strategy = strategies.BasicConsolidation(
sercon._compute_model = ( config=mock.Mock(datasource="ceilometer"))
strategy._compute_model = (
current_state_cluster.generate_scenario_3_with_2_nodes()) current_state_cluster.generate_scenario_3_with_2_nodes())
sercon.ceilometer = mock.MagicMock( strategy.ceilometer = mock.MagicMock(
get_statistics=metrics.mock_get_statistics) get_statistics=metrics.mock_get_statistics)
return sercon.execute() return strategy.execute()
class TestActionScheduling(base.DbTestCase): class TestActionScheduling(base.DbTestCase):

View File

@ -26,16 +26,26 @@ from watcher.common import exception
from watcher.decision_engine.model import model_root from watcher.decision_engine.model import model_root
from watcher.decision_engine.strategy import strategies from watcher.decision_engine.strategy import strategies
from watcher.tests import base from watcher.tests import base
from watcher.tests.decision_engine.model import ceilometer_metrics
from watcher.tests.decision_engine.model import faker_cluster_state from watcher.tests.decision_engine.model import faker_cluster_state
from watcher.tests.decision_engine.model import faker_metrics_collector from watcher.tests.decision_engine.model import monasca_metrics
class TestBasicConsolidation(base.TestCase): class TestBasicConsolidation(base.TestCase):
scenarios = [
("Ceilometer",
{"datasource": "ceilometer",
"fake_datasource_cls": ceilometer_metrics.FakeCeilometerMetrics}),
("Monasca",
{"datasource": "monasca",
"fake_datasource_cls": monasca_metrics.FakeMonascaMetrics}),
]
def setUp(self): def setUp(self):
super(TestBasicConsolidation, self).setUp() super(TestBasicConsolidation, self).setUp()
# fake metrics # fake metrics
self.fake_metrics = faker_metrics_collector.FakerMetricsCollector() self.fake_metrics = self.fake_datasource_cls()
# fake cluster # fake cluster
self.fake_cluster = faker_cluster_state.FakerModelCollector() self.fake_cluster = faker_cluster_state.FakerModelCollector()
@ -50,11 +60,11 @@ class TestBasicConsolidation(base.TestCase):
self.m_model = p_model.start() self.m_model = p_model.start()
self.addCleanup(p_model.stop) self.addCleanup(p_model.stop)
p_ceilometer = mock.patch.object( p_datasource = mock.patch.object(
strategies.BasicConsolidation, "ceilometer", strategies.BasicConsolidation, self.datasource,
new_callable=mock.PropertyMock) new_callable=mock.PropertyMock)
self.m_ceilometer = p_ceilometer.start() self.m_datasource = p_datasource.start()
self.addCleanup(p_ceilometer.stop) self.addCleanup(p_datasource.stop)
p_audit_scope = mock.patch.object( p_audit_scope = mock.patch.object(
strategies.BasicConsolidation, "audit_scope", strategies.BasicConsolidation, "audit_scope",
@ -66,9 +76,10 @@ class TestBasicConsolidation(base.TestCase):
self.m_audit_scope.return_value = mock.Mock() self.m_audit_scope.return_value = mock.Mock()
self.m_model.return_value = model_root.ModelRoot() self.m_model.return_value = model_root.ModelRoot()
self.m_ceilometer.return_value = mock.Mock( self.m_datasource.return_value = mock.Mock(
statistic_aggregation=self.fake_metrics.mock_get_statistics) statistic_aggregation=self.fake_metrics.mock_get_statistics)
self.strategy = strategies.BasicConsolidation(config=mock.Mock()) self.strategy = strategies.BasicConsolidation(
config=mock.Mock(datasource=self.datasource))
def test_cluster_size(self): def test_cluster_size(self):
size_cluster = len( size_cluster = len(
@ -126,7 +137,7 @@ class TestBasicConsolidation(base.TestCase):
instance_0_score = 0.023333333333333355 instance_0_score = 0.023333333333333355
self.assertEqual( self.assertEqual(
instance_0_score, instance_0_score,
self.strategy.calculate_score_instance(instance_0, )) self.strategy.calculate_score_instance(instance_0))
def test_basic_consolidation_weight(self): def test_basic_consolidation_weight(self):
model = self.fake_cluster.generate_scenario_1() model = self.fake_cluster.generate_scenario_1()

View File

@ -26,8 +26,8 @@ from watcher.decision_engine.model import element
from watcher.decision_engine.model import model_root from watcher.decision_engine.model import model_root
from watcher.decision_engine.strategy import strategies from watcher.decision_engine.strategy import strategies
from watcher.tests import base from watcher.tests import base
from watcher.tests.decision_engine.model import ceilometer_metrics
from watcher.tests.decision_engine.model import faker_cluster_state from watcher.tests.decision_engine.model import faker_cluster_state
from watcher.tests.decision_engine.model import faker_metrics_collector
class TestOutletTempControl(base.TestCase): class TestOutletTempControl(base.TestCase):
@ -35,7 +35,7 @@ class TestOutletTempControl(base.TestCase):
def setUp(self): def setUp(self):
super(TestOutletTempControl, self).setUp() super(TestOutletTempControl, self).setUp()
# fake metrics # fake metrics
self.fake_metrics = faker_metrics_collector.FakerMetricsCollector() self.fake_metrics = ceilometer_metrics.FakeCeilometerMetrics()
# fake cluster # fake cluster
self.fake_cluster = faker_cluster_state.FakerModelCollector() self.fake_cluster = faker_cluster_state.FakerModelCollector()

View File

@ -26,8 +26,8 @@ from watcher.decision_engine.model import element
from watcher.decision_engine.model import model_root from watcher.decision_engine.model import model_root
from watcher.decision_engine.strategy import strategies from watcher.decision_engine.strategy import strategies
from watcher.tests import base from watcher.tests import base
from watcher.tests.decision_engine.model import ceilometer_metrics
from watcher.tests.decision_engine.model import faker_cluster_state from watcher.tests.decision_engine.model import faker_cluster_state
from watcher.tests.decision_engine.model import faker_metrics_collector
class TestUniformAirflow(base.TestCase): class TestUniformAirflow(base.TestCase):
@ -35,7 +35,7 @@ class TestUniformAirflow(base.TestCase):
def setUp(self): def setUp(self):
super(TestUniformAirflow, self).setUp() super(TestUniformAirflow, self).setUp()
# fake metrics # fake metrics
self.fake_metrics = faker_metrics_collector.FakerMetricsCollector() self.fake_metrics = ceilometer_metrics.FakeCeilometerMetrics()
# fake cluster # fake cluster
self.fake_cluster = faker_cluster_state.FakerModelCollector() self.fake_cluster = faker_cluster_state.FakerModelCollector()

View File

@ -26,8 +26,8 @@ from watcher.decision_engine.model import element
from watcher.decision_engine.model import model_root from watcher.decision_engine.model import model_root
from watcher.decision_engine.strategy import strategies from watcher.decision_engine.strategy import strategies
from watcher.tests import base from watcher.tests import base
from watcher.tests.decision_engine.model import ceilometer_metrics
from watcher.tests.decision_engine.model import faker_cluster_state from watcher.tests.decision_engine.model import faker_cluster_state
from watcher.tests.decision_engine.model import faker_metrics_collector
class TestWorkloadBalance(base.TestCase): class TestWorkloadBalance(base.TestCase):
@ -35,7 +35,7 @@ class TestWorkloadBalance(base.TestCase):
def setUp(self): def setUp(self):
super(TestWorkloadBalance, self).setUp() super(TestWorkloadBalance, self).setUp()
# fake metrics # fake metrics
self.fake_metrics = faker_metrics_collector.FakerMetricsCollector() self.fake_metrics = ceilometer_metrics.FakeCeilometerMetrics()
# fake cluster # fake cluster
self.fake_cluster = faker_cluster_state.FakerModelCollector() self.fake_cluster = faker_cluster_state.FakerModelCollector()

View File

@ -23,8 +23,8 @@ from watcher.common import utils
from watcher.decision_engine.model import model_root from watcher.decision_engine.model import model_root
from watcher.decision_engine.strategy import strategies from watcher.decision_engine.strategy import strategies
from watcher.tests import base from watcher.tests import base
from watcher.tests.decision_engine.model import ceilometer_metrics
from watcher.tests.decision_engine.model import faker_cluster_state from watcher.tests.decision_engine.model import faker_cluster_state
from watcher.tests.decision_engine.model import faker_metrics_collector
class TestWorkloadStabilization(base.TestCase): class TestWorkloadStabilization(base.TestCase):
@ -33,7 +33,7 @@ class TestWorkloadStabilization(base.TestCase):
super(TestWorkloadStabilization, self).setUp() super(TestWorkloadStabilization, self).setUp()
# fake metrics # fake metrics
self.fake_metrics = faker_metrics_collector.FakerMetricsCollector() self.fake_metrics = ceilometer_metrics.FakeCeilometerMetrics()
# fake cluster # fake cluster
self.fake_cluster = faker_cluster_state.FakerModelCollector() self.fake_cluster = faker_cluster_state.FakerModelCollector()