From 3d741d05aa6abde7d8332334909944fb710e109d Mon Sep 17 00:00:00 2001 From: licanwei Date: Tue, 2 Jul 2019 13:54:55 +0800 Subject: [PATCH] Improve Compute Data Model The fields(vcpus, memory and disk_capacity) in the Watcher ComputeNode do not take allocation ratios used for overcommit into account so there may be disparity between this and the used count. This patch added some new fields to solve this problem. Partially Implements: blueprint improve-compute-data-model Change-Id: Id33496f368fb23cb8e744c7e8451e1cd1397866b --- lower-constraints.txt | 1 + requirements.txt | 1 + .../decision_engine/model/collector/nova.py | 55 +++++++++++++++- watcher/decision_engine/model/element/node.py | 24 +++++++ .../decision_engine/cluster/test_nova_cdmc.py | 66 ++++++++++++++++++- 5 files changed, 141 insertions(+), 6 deletions(-) diff --git a/lower-constraints.txt b/lower-constraints.txt index facde9d67..c83c905f5 100644 --- a/lower-constraints.txt +++ b/lower-constraints.txt @@ -72,6 +72,7 @@ os-client-config==1.29.0 os-service-types==1.2.0 os-testr==1.0.0 osc-lib==1.10.0 +os-resource-classes==0.4.0 oslo.cache==1.29.0 oslo.concurrency==3.26.0 oslo.config==5.2.0 diff --git a/requirements.txt b/requirements.txt index 81f58a1c3..e3d530005 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,6 +10,7 @@ jsonschema>=2.6.0 # MIT keystonemiddleware>=4.21.0 # Apache-2.0 lxml>=4.1.1 # BSD croniter>=0.3.20 # MIT License +os-resource-classes>=0.4.0 oslo.concurrency>=3.26.0 # Apache-2.0 oslo.cache>=1.29.0 # Apache-2.0 oslo.config>=5.2.0 # Apache-2.0 diff --git a/watcher/decision_engine/model/collector/nova.py b/watcher/decision_engine/model/collector/nova.py index 8802f7336..6dec9b105 100644 --- a/watcher/decision_engine/model/collector/nova.py +++ b/watcher/decision_engine/model/collector/nova.py @@ -13,9 +13,11 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os_resource_classes as orc from oslo_log import log from watcher.common import nova_helper +from watcher.common import placement_helper from watcher.decision_engine.model.collector import base from watcher.decision_engine.model import element from watcher.decision_engine.model import model_root @@ -209,6 +211,7 @@ class NovaModelBuilder(base.BaseModelBuilder): self.no_model_scope_flag = False self.nova = osc.nova() self.nova_helper = nova_helper.NovaHelper(osc=self.osc) + self.placement_helper = placement_helper.PlacementHelper(osc=self.osc) def _collect_aggregates(self, host_aggregates, _nodes): aggregate_list = self.call_retry(f=self.nova_helper.get_aggregate_list) @@ -307,15 +310,61 @@ class NovaModelBuilder(base.BaseModelBuilder): :param node: A node hypervisor instance :type node: :py:class:`~novaclient.v2.hypervisors.Hypervisor` """ + inventories = self.placement_helper.get_inventories(node.id) + if inventories: + vcpus = inventories[orc.VCPU].get('total', node.vcpus) + vcpu_reserved = inventories[orc.VCPU].get('reserved', 0) + vcpu_ratio = inventories[orc.VCPU].get('allocation_ratio', 1.0) + memory_mb = inventories[orc.MEMORY_MB].get( + 'total', node.memory_mb) + memory_mb_reserved = inventories[orc.MEMORY_MB].get('reserved', 0) + memory_ratio = inventories[orc.MEMORY_MB].get( + 'allocation_ratio', 1.0) + disk_capacity = inventories[orc.DISK_GB].get( + 'total', node.local_gb) + disk_gb_reserved = inventories[orc.DISK_GB].get('reserved', 0) + disk_ratio = inventories[orc.DISK_GB].get( + 'allocation_ratio', 1.0) + else: + vcpus = node.vcpus + vcpu_reserved = 0 + vcpu_ratio = 1.0 + memory_mb = node.memory_mb + memory_mb_reserved = 0 + memory_ratio = 1.0 + disk_capacity = node.local_gb + disk_gb_reserved = 0 + disk_ratio = 1.0 + + usages = self.placement_helper.get_usages_for_resource_provider( + node.id) + if usages: + vcpus_used = usages.get(orc.VCPU, node.vcpus_used) + memory_used = usages.get(orc.MEMORY_MB, node.memory_mb_used) + disk_used = usages.get(orc.DISK_GB, node.local_gb_used) + else: + vcpus_used = node.vcpus_used + memory_used = node.memory_mb_used + disk_used = node.local_gb_used + # build up the compute node. node_attributes = { "id": node.id, "uuid": node.service["host"], "hostname": node.hypervisor_hostname, - "memory": node.memory_mb, + "memory": memory_mb, + "memory_ratio": memory_ratio, + "memory_mb_reserved": memory_mb_reserved, + "memory_mb_used": memory_used, "disk": node.free_disk_gb, - "disk_capacity": node.local_gb, - "vcpus": node.vcpus, + "disk_capacity": disk_capacity, + "disk_gb_used": disk_used, + "disk_gb_reserved": disk_gb_reserved, + "disk_ratio": disk_ratio, + "vcpus": vcpus, + "vcpu_reserved": vcpu_reserved, + "vcpu_ratio": vcpu_ratio, + "vcpus_used": vcpus_used, "state": node.state, "status": node.status, "disabled_reason": node.service["disabled_reason"]} diff --git a/watcher/decision_engine/model/element/node.py b/watcher/decision_engine/model/element/node.py index 2911aa486..6960c8200 100644 --- a/watcher/decision_engine/model/element/node.py +++ b/watcher/decision_engine/model/element/node.py @@ -40,14 +40,38 @@ class ComputeNode(compute_resource.ComputeResource): "disabled_reason": wfields.StringField(nullable=True), "state": wfields.StringField(default=ServiceState.ONLINE.value), "memory": wfields.NonNegativeIntegerField(), + "memory_mb_reserved": wfields.NonNegativeIntegerField(), + "memory_mb_used": wfields.NonNegativeIntegerField(), "disk": wfields.IntegerField(), "disk_capacity": wfields.NonNegativeIntegerField(), + "disk_gb_reserved": wfields.NonNegativeIntegerField(), + "disk_gb_used": wfields.NonNegativeIntegerField(), "vcpus": wfields.NonNegativeIntegerField(), + "vcpus_used": wfields.NonNegativeIntegerField(), + "vcpu_reserved": wfields.NonNegativeIntegerField(), + "memory_ratio": wfields.NonNegativeFloatField(), + "vcpu_ratio": wfields.NonNegativeFloatField(), + "disk_ratio": wfields.NonNegativeFloatField(), } def accept(self, visitor): raise NotImplementedError() + @property + def memory_mb_free(self): + total = (self.memory-self.memory_mb_reserved)*self.memory_ratio + return total-self.memory_mb_used + + @property + def disk_gb_free(self): + total = (self.disk_capacity-self.disk_gb_reserved)*self.disk_ratio + return total-self.disk_gb_used + + @property + def vcpus_free(self): + total = (self.vcpus-self.vcpu_reserved)*self.vcpu_ratio + return total-self.vcpus_used + @base.WatcherObjectRegistry.register_if(False) class StorageNode(storage_resource.StorageResource): diff --git a/watcher/tests/decision_engine/cluster/test_nova_cdmc.py b/watcher/tests/decision_engine/cluster/test_nova_cdmc.py index c02f93688..364777644 100644 --- a/watcher/tests/decision_engine/cluster/test_nova_cdmc.py +++ b/watcher/tests/decision_engine/cluster/test_nova_cdmc.py @@ -17,8 +17,10 @@ # limitations under the License. import mock +import os_resource_classes as orc from watcher.common import nova_helper +from watcher.common import placement_helper from watcher.decision_engine.model.collector import nova from watcher.tests import base from watcher.tests import conf_fixture @@ -31,8 +33,40 @@ class TestNovaClusterDataModelCollector(base.TestCase): self.useFixture(conf_fixture.ConfReloadFixture()) @mock.patch('keystoneclient.v3.client.Client', mock.Mock()) + @mock.patch.object(placement_helper, 'PlacementHelper') @mock.patch.object(nova_helper, 'NovaHelper') - def test_nova_cdmc_execute(self, m_nova_helper_cls): + def test_nova_cdmc_execute(self, m_nova_helper_cls, + m_placement_helper_cls): + m_placement_helper = mock.Mock(name="placement_helper") + m_placement_helper.get_inventories.return_value = { + orc.VCPU: { + "allocation_ratio": 16.0, + "total": 8, + "reserved": 0, + "step_size": 1, + "min_unit": 1, + "max_unit": 8}, + orc.MEMORY_MB: { + "allocation_ratio": 1.5, + "total": 16039, + "reserved": 512, + "step_size": 1, + "min_unit": 1, + "max_unit": 16039}, + orc.DISK_GB: { + "allocation_ratio": 1.0, + "total": 142, + "reserved": 0, + "step_size": 1, + "min_unit": 1, + "max_unit": 142} + } + m_placement_helper.get_usages_for_resource_provider.return_value = { + orc.DISK_GB: 10, + orc.MEMORY_MB: 100, + orc.VCPU: 0 + } + m_placement_helper_cls.return_value = m_placement_helper m_nova_helper = mock.Mock(name="nova_helper") m_nova_helper_cls.return_value = m_nova_helper m_nova_helper.get_service.return_value = mock.Mock( @@ -60,9 +94,12 @@ class TestNovaClusterDataModelCollector(base.TestCase): service={'id': 123, 'host': 'test_hostname', 'disabled_reason': ''}, memory_mb=333, + memory_mb_used=100, free_disk_gb=222, local_gb=111, + local_gb_used=10, vcpus=4, + vcpus_used=0, servers=None, # Don't let the mock return a value for servers. **minimal_node ) @@ -70,9 +107,12 @@ class TestNovaClusterDataModelCollector(base.TestCase): service={'id': 123, 'host': 'test_hostname', 'disabled_reason': ''}, memory_mb=333, + memory_mb_used=100, free_disk_gb=222, local_gb=111, + local_gb_used=10, vcpus=4, + vcpus_used=0, **minimal_node_with_servers) fake_instance = mock.Mock( id='ef500f7e-dac8-470f-960c-169486fce71b', @@ -112,6 +152,18 @@ class TestNovaClusterDataModelCollector(base.TestCase): self.assertEqual(node.uuid, 'test_hostname') self.assertEqual(instance.uuid, 'ef500f7e-dac8-470f-960c-169486fce71b') + memory_total = node.memory - node.memory_mb_reserved + memory_free = memory_total * node.memory_ratio - node.memory_mb_used + self.assertEqual(node.memory_mb_free, memory_free) + + disk_total = node.disk_capacity - node.disk_gb_reserved + disk_free = disk_total * node.disk_ratio - node.disk_gb_used + self.assertEqual(node.disk_gb_free, disk_free) + + vcpus_total = node.vcpus - node.vcpu_reserved + vcpus_free = vcpus_total * node.vcpu_ratio - node.vcpus_used + self.assertEqual(node.vcpus_free, vcpus_free) + m_nova_helper.get_compute_node_by_name.assert_called_once_with( minimal_node['hypervisor_hostname'], servers=True, detailed=True) m_nova_helper.get_instance_list.assert_called_once_with( @@ -273,10 +325,15 @@ class TestNovaModelBuilder(base.TestCase): self.assertEqual( m_nova.return_value.get_compute_node_by_name.call_count, 2) + @mock.patch.object(placement_helper, 'PlacementHelper') @mock.patch.object(nova_helper, 'NovaHelper') - def test_add_physical_layer_with_baremetal_node(self, m_nova): + def test_add_physical_layer_with_baremetal_node(self, m_nova, + m_placement_helper): """""" - + mock_placement = mock.Mock(name="placement_helper") + mock_placement.get_inventories.return_value = dict() + mock_placement.get_usages_for_resource_provider.return_value = dict() + m_placement_helper.return_value = mock_placement m_nova.return_value.get_aggregate_list.return_value = \ [mock.Mock(id=1, name='example'), mock.Mock(id=5, name='example', hosts=['hostone', 'hosttwo'])] @@ -292,9 +349,12 @@ class TestNovaModelBuilder(base.TestCase): state='TEST_STATE', status='TEST_STATUS', memory_mb=333, + memory_mb_used=100, free_disk_gb=222, local_gb=111, + local_gb_used=10, vcpus=4, + vcpus_used=0, servers=[ {'name': 'fake_instance', 'uuid': 'ef500f7e-dac8-470f-960c-169486fce71b'}