Add periods input parameter
This patch set adds new periods strategy input parameter that allows to specify the time length of statistic aggregation. Change-Id: Id6c7900e7b909b0b325281c4038e07dc695847a1
This commit is contained in:
parent
120c116655
commit
295c8d914c
@ -92,12 +92,22 @@ parameter type default Value description
|
|||||||
host from list.
|
host from list.
|
||||||
``retry_count`` number 1 Count of random returned
|
``retry_count`` number 1 Count of random returned
|
||||||
hosts.
|
hosts.
|
||||||
|
``periods`` object |periods| These periods are used to get
|
||||||
|
statistic aggregation for
|
||||||
|
instance and host metrics.
|
||||||
|
The period is simply a
|
||||||
|
repeating interval of time
|
||||||
|
into which the samples are
|
||||||
|
grouped for aggregation.
|
||||||
|
Watcher uses only the last
|
||||||
|
period of all recieved ones.
|
||||||
==================== ====== ===================== =============================
|
==================== ====== ===================== =============================
|
||||||
|
|
||||||
.. |metrics| replace:: ["cpu_util", "memory.resident"]
|
.. |metrics| replace:: ["cpu_util", "memory.resident"]
|
||||||
.. |thresholds| replace:: {"cpu_util": 0.2, "memory.resident": 0.2}
|
.. |thresholds| replace:: {"cpu_util": 0.2, "memory.resident": 0.2}
|
||||||
.. |weights| replace:: {"cpu_util_weight": 1.0, "memory.resident_weight": 1.0}
|
.. |weights| replace:: {"cpu_util_weight": 1.0, "memory.resident_weight": 1.0}
|
||||||
.. |instance_metrics| replace:: {"cpu_util": "compute.node.cpu.percent", "memory.resident": "hardware.memory.used"}
|
.. |instance_metrics| replace:: {"cpu_util": "compute.node.cpu.percent", "memory.resident": "hardware.memory.used"}
|
||||||
|
.. |periods| replace:: {"instance": 720, "node": 600}
|
||||||
|
|
||||||
Efficacy Indicator
|
Efficacy Indicator
|
||||||
------------------
|
------------------
|
||||||
|
@ -80,6 +80,7 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
|
|||||||
self.host_choice = None
|
self.host_choice = None
|
||||||
self.instance_metrics = None
|
self.instance_metrics = None
|
||||||
self.retry_count = None
|
self.retry_count = None
|
||||||
|
self.periods = None
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_name(cls):
|
def get_name(cls):
|
||||||
@ -138,6 +139,17 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
|
|||||||
"description": "Count of random returned hosts",
|
"description": "Count of random returned hosts",
|
||||||
"type": "number",
|
"type": "number",
|
||||||
"default": 1
|
"default": 1
|
||||||
|
},
|
||||||
|
"periods": {
|
||||||
|
"description": "These periods are used to get statistic "
|
||||||
|
"aggregation for instance and host "
|
||||||
|
"metrics. The period is simply a repeating"
|
||||||
|
" interval of time into which the samples"
|
||||||
|
" are grouped for aggregation. Watcher "
|
||||||
|
"uses only the last period of all recieved"
|
||||||
|
" ones.",
|
||||||
|
"type": "object",
|
||||||
|
"default": {"instance": 720, "node": 600}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -190,7 +202,7 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
|
|||||||
avg_meter = self.ceilometer.statistic_aggregation(
|
avg_meter = self.ceilometer.statistic_aggregation(
|
||||||
resource_id=instance_uuid,
|
resource_id=instance_uuid,
|
||||||
meter_name=meter,
|
meter_name=meter,
|
||||||
period="120",
|
period=self.periods['instance'],
|
||||||
aggregate='min'
|
aggregate='min'
|
||||||
)
|
)
|
||||||
if avg_meter is None:
|
if avg_meter is None:
|
||||||
@ -244,7 +256,7 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
|
|||||||
avg_meter = self.ceilometer.statistic_aggregation(
|
avg_meter = self.ceilometer.statistic_aggregation(
|
||||||
resource_id=resource_id,
|
resource_id=resource_id,
|
||||||
meter_name=self.instance_metrics[metric],
|
meter_name=self.instance_metrics[metric],
|
||||||
period="60",
|
period=self.periods['node'],
|
||||||
aggregate='avg'
|
aggregate='avg'
|
||||||
)
|
)
|
||||||
if avg_meter is None:
|
if avg_meter is None:
|
||||||
@ -414,6 +426,7 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
|
|||||||
self.host_choice = self.input_parameters.host_choice
|
self.host_choice = self.input_parameters.host_choice
|
||||||
self.instance_metrics = self.input_parameters.instance_metrics
|
self.instance_metrics = self.input_parameters.instance_metrics
|
||||||
self.retry_count = self.input_parameters.retry_count
|
self.retry_count = self.input_parameters.retry_count
|
||||||
|
self.periods = self.input_parameters.periods
|
||||||
|
|
||||||
def do_execute(self):
|
def do_execute(self):
|
||||||
migration = self.check_threshold()
|
migration = self.check_threshold()
|
||||||
|
@ -79,7 +79,8 @@ class TestWorkloadStabilization(base.TestCase):
|
|||||||
{"cpu_util": "compute.node.cpu.percent",
|
{"cpu_util": "compute.node.cpu.percent",
|
||||||
"memory.resident": "hardware.memory.used"},
|
"memory.resident": "hardware.memory.used"},
|
||||||
'host_choice': 'retry',
|
'host_choice': 'retry',
|
||||||
'retry_count': 1})
|
'retry_count': 1,
|
||||||
|
'periods': {"instance": 720, "node": 600}})
|
||||||
self.strategy.metrics = ["cpu_util", "memory.resident"]
|
self.strategy.metrics = ["cpu_util", "memory.resident"]
|
||||||
self.strategy.thresholds = {"cpu_util": 0.2, "memory.resident": 0.2}
|
self.strategy.thresholds = {"cpu_util": 0.2, "memory.resident": 0.2}
|
||||||
self.strategy.weights = {"cpu_util_weight": 1.0,
|
self.strategy.weights = {"cpu_util_weight": 1.0,
|
||||||
@ -89,6 +90,7 @@ class TestWorkloadStabilization(base.TestCase):
|
|||||||
"memory.resident": "hardware.memory.used"}
|
"memory.resident": "hardware.memory.used"}
|
||||||
self.strategy.host_choice = 'retry'
|
self.strategy.host_choice = 'retry'
|
||||||
self.strategy.retry_count = 1
|
self.strategy.retry_count = 1
|
||||||
|
self.strategy.periods = {"instance": 720, "node": 600}
|
||||||
|
|
||||||
def test_get_instance_load(self):
|
def test_get_instance_load(self):
|
||||||
self.m_model.return_value = self.fake_cluster.generate_scenario_1()
|
self.m_model.return_value = self.fake_cluster.generate_scenario_1()
|
||||||
@ -98,6 +100,23 @@ class TestWorkloadStabilization(base.TestCase):
|
|||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
instance_0_dict, self.strategy.get_instance_load("INSTANCE_0"))
|
instance_0_dict, self.strategy.get_instance_load("INSTANCE_0"))
|
||||||
|
|
||||||
|
def test_periods(self):
|
||||||
|
self.m_model.return_value = self.fake_cluster.generate_scenario_1()
|
||||||
|
p_ceilometer = mock.patch.object(
|
||||||
|
strategies.WorkloadStabilization, "ceilometer")
|
||||||
|
m_ceilometer = p_ceilometer.start()
|
||||||
|
self.addCleanup(p_ceilometer.stop)
|
||||||
|
m_ceilometer.return_value = mock.Mock(
|
||||||
|
statistic_aggregation=self.fake_metrics.mock_get_statistics)
|
||||||
|
self.strategy.get_instance_load("INSTANCE_0")
|
||||||
|
m_ceilometer.statistic_aggregation.assert_called_with(
|
||||||
|
aggregate='min', meter_name='memory.resident',
|
||||||
|
period=720, resource_id='INSTANCE_0')
|
||||||
|
self.strategy.get_hosts_load()
|
||||||
|
m_ceilometer.statistic_aggregation.assert_called_with(
|
||||||
|
aggregate='avg', meter_name='hardware.memory.used',
|
||||||
|
period=600, resource_id=mock.ANY)
|
||||||
|
|
||||||
def test_normalize_hosts_load(self):
|
def test_normalize_hosts_load(self):
|
||||||
self.m_model.return_value = self.fake_cluster.generate_scenario_1()
|
self.m_model.return_value = self.fake_cluster.generate_scenario_1()
|
||||||
fake_hosts = {'Node_0': {'cpu_util': 0.07, 'memory.resident': 7},
|
fake_hosts = {'Node_0': {'cpu_util': 0.07, 'memory.resident': 7},
|
||||||
|
Loading…
Reference in New Issue
Block a user