Improve Compute Data Model

The fields(vcpus, memory and disk_capacity) in the Watcher ComputeNode
do not take allocation ratios used for overcommit into account so there
may be disparity between this and the used count.
This patch added some new fields to solve this problem.

Partially Implements: blueprint improve-compute-data-model

Change-Id: Id33496f368fb23cb8e744c7e8451e1cd1397866b
This commit is contained in:
licanwei 2019-07-02 13:54:55 +08:00
parent cd86e85ae8
commit 3d741d05aa
5 changed files with 141 additions and 6 deletions

View File

@ -72,6 +72,7 @@ os-client-config==1.29.0
os-service-types==1.2.0
os-testr==1.0.0
osc-lib==1.10.0
os-resource-classes==0.4.0
oslo.cache==1.29.0
oslo.concurrency==3.26.0
oslo.config==5.2.0

View File

@ -10,6 +10,7 @@ jsonschema>=2.6.0 # MIT
keystonemiddleware>=4.21.0 # Apache-2.0
lxml>=4.1.1 # BSD
croniter>=0.3.20 # MIT License
os-resource-classes>=0.4.0
oslo.concurrency>=3.26.0 # Apache-2.0
oslo.cache>=1.29.0 # Apache-2.0
oslo.config>=5.2.0 # Apache-2.0

View File

@ -13,9 +13,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import os_resource_classes as orc
from oslo_log import log
from watcher.common import nova_helper
from watcher.common import placement_helper
from watcher.decision_engine.model.collector import base
from watcher.decision_engine.model import element
from watcher.decision_engine.model import model_root
@ -209,6 +211,7 @@ class NovaModelBuilder(base.BaseModelBuilder):
self.no_model_scope_flag = False
self.nova = osc.nova()
self.nova_helper = nova_helper.NovaHelper(osc=self.osc)
self.placement_helper = placement_helper.PlacementHelper(osc=self.osc)
def _collect_aggregates(self, host_aggregates, _nodes):
aggregate_list = self.call_retry(f=self.nova_helper.get_aggregate_list)
@ -307,15 +310,61 @@ class NovaModelBuilder(base.BaseModelBuilder):
:param node: A node hypervisor instance
:type node: :py:class:`~novaclient.v2.hypervisors.Hypervisor`
"""
inventories = self.placement_helper.get_inventories(node.id)
if inventories:
vcpus = inventories[orc.VCPU].get('total', node.vcpus)
vcpu_reserved = inventories[orc.VCPU].get('reserved', 0)
vcpu_ratio = inventories[orc.VCPU].get('allocation_ratio', 1.0)
memory_mb = inventories[orc.MEMORY_MB].get(
'total', node.memory_mb)
memory_mb_reserved = inventories[orc.MEMORY_MB].get('reserved', 0)
memory_ratio = inventories[orc.MEMORY_MB].get(
'allocation_ratio', 1.0)
disk_capacity = inventories[orc.DISK_GB].get(
'total', node.local_gb)
disk_gb_reserved = inventories[orc.DISK_GB].get('reserved', 0)
disk_ratio = inventories[orc.DISK_GB].get(
'allocation_ratio', 1.0)
else:
vcpus = node.vcpus
vcpu_reserved = 0
vcpu_ratio = 1.0
memory_mb = node.memory_mb
memory_mb_reserved = 0
memory_ratio = 1.0
disk_capacity = node.local_gb
disk_gb_reserved = 0
disk_ratio = 1.0
usages = self.placement_helper.get_usages_for_resource_provider(
node.id)
if usages:
vcpus_used = usages.get(orc.VCPU, node.vcpus_used)
memory_used = usages.get(orc.MEMORY_MB, node.memory_mb_used)
disk_used = usages.get(orc.DISK_GB, node.local_gb_used)
else:
vcpus_used = node.vcpus_used
memory_used = node.memory_mb_used
disk_used = node.local_gb_used
# build up the compute node.
node_attributes = {
"id": node.id,
"uuid": node.service["host"],
"hostname": node.hypervisor_hostname,
"memory": node.memory_mb,
"memory": memory_mb,
"memory_ratio": memory_ratio,
"memory_mb_reserved": memory_mb_reserved,
"memory_mb_used": memory_used,
"disk": node.free_disk_gb,
"disk_capacity": node.local_gb,
"vcpus": node.vcpus,
"disk_capacity": disk_capacity,
"disk_gb_used": disk_used,
"disk_gb_reserved": disk_gb_reserved,
"disk_ratio": disk_ratio,
"vcpus": vcpus,
"vcpu_reserved": vcpu_reserved,
"vcpu_ratio": vcpu_ratio,
"vcpus_used": vcpus_used,
"state": node.state,
"status": node.status,
"disabled_reason": node.service["disabled_reason"]}

View File

@ -40,14 +40,38 @@ class ComputeNode(compute_resource.ComputeResource):
"disabled_reason": wfields.StringField(nullable=True),
"state": wfields.StringField(default=ServiceState.ONLINE.value),
"memory": wfields.NonNegativeIntegerField(),
"memory_mb_reserved": wfields.NonNegativeIntegerField(),
"memory_mb_used": wfields.NonNegativeIntegerField(),
"disk": wfields.IntegerField(),
"disk_capacity": wfields.NonNegativeIntegerField(),
"disk_gb_reserved": wfields.NonNegativeIntegerField(),
"disk_gb_used": wfields.NonNegativeIntegerField(),
"vcpus": wfields.NonNegativeIntegerField(),
"vcpus_used": wfields.NonNegativeIntegerField(),
"vcpu_reserved": wfields.NonNegativeIntegerField(),
"memory_ratio": wfields.NonNegativeFloatField(),
"vcpu_ratio": wfields.NonNegativeFloatField(),
"disk_ratio": wfields.NonNegativeFloatField(),
}
def accept(self, visitor):
raise NotImplementedError()
@property
def memory_mb_free(self):
total = (self.memory-self.memory_mb_reserved)*self.memory_ratio
return total-self.memory_mb_used
@property
def disk_gb_free(self):
total = (self.disk_capacity-self.disk_gb_reserved)*self.disk_ratio
return total-self.disk_gb_used
@property
def vcpus_free(self):
total = (self.vcpus-self.vcpu_reserved)*self.vcpu_ratio
return total-self.vcpus_used
@base.WatcherObjectRegistry.register_if(False)
class StorageNode(storage_resource.StorageResource):

View File

@ -17,8 +17,10 @@
# limitations under the License.
import mock
import os_resource_classes as orc
from watcher.common import nova_helper
from watcher.common import placement_helper
from watcher.decision_engine.model.collector import nova
from watcher.tests import base
from watcher.tests import conf_fixture
@ -31,8 +33,40 @@ class TestNovaClusterDataModelCollector(base.TestCase):
self.useFixture(conf_fixture.ConfReloadFixture())
@mock.patch('keystoneclient.v3.client.Client', mock.Mock())
@mock.patch.object(placement_helper, 'PlacementHelper')
@mock.patch.object(nova_helper, 'NovaHelper')
def test_nova_cdmc_execute(self, m_nova_helper_cls):
def test_nova_cdmc_execute(self, m_nova_helper_cls,
m_placement_helper_cls):
m_placement_helper = mock.Mock(name="placement_helper")
m_placement_helper.get_inventories.return_value = {
orc.VCPU: {
"allocation_ratio": 16.0,
"total": 8,
"reserved": 0,
"step_size": 1,
"min_unit": 1,
"max_unit": 8},
orc.MEMORY_MB: {
"allocation_ratio": 1.5,
"total": 16039,
"reserved": 512,
"step_size": 1,
"min_unit": 1,
"max_unit": 16039},
orc.DISK_GB: {
"allocation_ratio": 1.0,
"total": 142,
"reserved": 0,
"step_size": 1,
"min_unit": 1,
"max_unit": 142}
}
m_placement_helper.get_usages_for_resource_provider.return_value = {
orc.DISK_GB: 10,
orc.MEMORY_MB: 100,
orc.VCPU: 0
}
m_placement_helper_cls.return_value = m_placement_helper
m_nova_helper = mock.Mock(name="nova_helper")
m_nova_helper_cls.return_value = m_nova_helper
m_nova_helper.get_service.return_value = mock.Mock(
@ -60,9 +94,12 @@ class TestNovaClusterDataModelCollector(base.TestCase):
service={'id': 123, 'host': 'test_hostname',
'disabled_reason': ''},
memory_mb=333,
memory_mb_used=100,
free_disk_gb=222,
local_gb=111,
local_gb_used=10,
vcpus=4,
vcpus_used=0,
servers=None, # Don't let the mock return a value for servers.
**minimal_node
)
@ -70,9 +107,12 @@ class TestNovaClusterDataModelCollector(base.TestCase):
service={'id': 123, 'host': 'test_hostname',
'disabled_reason': ''},
memory_mb=333,
memory_mb_used=100,
free_disk_gb=222,
local_gb=111,
local_gb_used=10,
vcpus=4,
vcpus_used=0,
**minimal_node_with_servers)
fake_instance = mock.Mock(
id='ef500f7e-dac8-470f-960c-169486fce71b',
@ -112,6 +152,18 @@ class TestNovaClusterDataModelCollector(base.TestCase):
self.assertEqual(node.uuid, 'test_hostname')
self.assertEqual(instance.uuid, 'ef500f7e-dac8-470f-960c-169486fce71b')
memory_total = node.memory - node.memory_mb_reserved
memory_free = memory_total * node.memory_ratio - node.memory_mb_used
self.assertEqual(node.memory_mb_free, memory_free)
disk_total = node.disk_capacity - node.disk_gb_reserved
disk_free = disk_total * node.disk_ratio - node.disk_gb_used
self.assertEqual(node.disk_gb_free, disk_free)
vcpus_total = node.vcpus - node.vcpu_reserved
vcpus_free = vcpus_total * node.vcpu_ratio - node.vcpus_used
self.assertEqual(node.vcpus_free, vcpus_free)
m_nova_helper.get_compute_node_by_name.assert_called_once_with(
minimal_node['hypervisor_hostname'], servers=True, detailed=True)
m_nova_helper.get_instance_list.assert_called_once_with(
@ -273,10 +325,15 @@ class TestNovaModelBuilder(base.TestCase):
self.assertEqual(
m_nova.return_value.get_compute_node_by_name.call_count, 2)
@mock.patch.object(placement_helper, 'PlacementHelper')
@mock.patch.object(nova_helper, 'NovaHelper')
def test_add_physical_layer_with_baremetal_node(self, m_nova):
def test_add_physical_layer_with_baremetal_node(self, m_nova,
m_placement_helper):
""""""
mock_placement = mock.Mock(name="placement_helper")
mock_placement.get_inventories.return_value = dict()
mock_placement.get_usages_for_resource_provider.return_value = dict()
m_placement_helper.return_value = mock_placement
m_nova.return_value.get_aggregate_list.return_value = \
[mock.Mock(id=1, name='example'),
mock.Mock(id=5, name='example', hosts=['hostone', 'hosttwo'])]
@ -292,9 +349,12 @@ class TestNovaModelBuilder(base.TestCase):
state='TEST_STATE',
status='TEST_STATUS',
memory_mb=333,
memory_mb_used=100,
free_disk_gb=222,
local_gb=111,
local_gb_used=10,
vcpus=4,
vcpus_used=0,
servers=[
{'name': 'fake_instance',
'uuid': 'ef500f7e-dac8-470f-960c-169486fce71b'}