IBM DS8K: Report backend state
This patch implements reporting backend state in service list. Implements: blueprint report-backend-state-in-service-list Change-Id: I750bb3d2f534734e74e895aa597259ce719e8ff3
This commit is contained in:
parent
f412fde615
commit
3e12e2f930
@ -1259,7 +1259,8 @@ class DS8KProxyTest(test.TestCase):
|
|||||||
"consistent_group_snapshot_enabled": True,
|
"consistent_group_snapshot_enabled": True,
|
||||||
"group_replication_enabled": True,
|
"group_replication_enabled": True,
|
||||||
"consistent_group_replication_enabled": True,
|
"consistent_group_replication_enabled": True,
|
||||||
"multiattach": False
|
"multiattach": False,
|
||||||
|
"backend_state": 'up'
|
||||||
}
|
}
|
||||||
|
|
||||||
self.driver._update_stats()
|
self.driver._update_stats()
|
||||||
@ -1283,8 +1284,56 @@ class DS8KProxyTest(test.TestCase):
|
|||||||
with mock.patch.object(helper.DS8KCommonHelper,
|
with mock.patch.object(helper.DS8KCommonHelper,
|
||||||
'get_pools') as mock_get_pools:
|
'get_pools') as mock_get_pools:
|
||||||
mock_get_pools.return_value = []
|
mock_get_pools.return_value = []
|
||||||
self.assertRaises(exception.CinderException,
|
stats = self.driver.get_volume_stats()
|
||||||
self.driver._update_stats)
|
self.assertEqual('down', stats['backend_state'])
|
||||||
|
self.assertEqual('None', stats['extent_pools'])
|
||||||
|
self.assertEqual(0, stats['total_capacity_gb'])
|
||||||
|
self.assertEqual(0, stats['free_capacity_gb'])
|
||||||
|
|
||||||
|
@mock.patch.object(helper.DS8KCommonHelper, 'get_pools')
|
||||||
|
def test_get_volume_status(self, mock_get_pools):
|
||||||
|
self.configuration.san_clustername = 'P0, P1'
|
||||||
|
self.driver = FakeDS8KProxy(self.storage_info, self.logger,
|
||||||
|
self.exception, self)
|
||||||
|
from collections import OrderedDict
|
||||||
|
mock_get_pools.side_effect = [OrderedDict([('P0',
|
||||||
|
{'node': 0,
|
||||||
|
'cap': 21474836480,
|
||||||
|
'capavail': 21474836480,
|
||||||
|
'name': 'pool1',
|
||||||
|
'stgtype': 'fb'}),
|
||||||
|
('P1',
|
||||||
|
{'node': 1,
|
||||||
|
'cap': 21474836480,
|
||||||
|
'capavail': 21474836480,
|
||||||
|
'name': 'pool1',
|
||||||
|
'stgtype': 'fb'})]),
|
||||||
|
OrderedDict([('P1',
|
||||||
|
{'node': 1,
|
||||||
|
'cap': 21474836480,
|
||||||
|
'capavail': 21474836480,
|
||||||
|
'name': 'pool1',
|
||||||
|
'stgtype': 'fb'})])]
|
||||||
|
self.driver.setup(self.ctxt)
|
||||||
|
expected_result = {
|
||||||
|
"volume_backend_name": TEST_VOLUME_BACKEND_NAME,
|
||||||
|
"serial_number": TEST_SOURCE_SYSTEM_UNIT,
|
||||||
|
"extent_pools": 'P1',
|
||||||
|
"vendor_name": 'IBM',
|
||||||
|
"driver_version": 'IBM Storage (v2.0.0)',
|
||||||
|
"storage_protocol": storage.XIV_CONNECTION_TYPE_FC,
|
||||||
|
"total_capacity_gb": 20,
|
||||||
|
"free_capacity_gb": 20,
|
||||||
|
"reserved_percentage": 0,
|
||||||
|
"consistent_group_snapshot_enabled": True,
|
||||||
|
"group_replication_enabled": True,
|
||||||
|
"consistent_group_replication_enabled": True,
|
||||||
|
"multiattach": False,
|
||||||
|
"backend_state": 'up'
|
||||||
|
}
|
||||||
|
|
||||||
|
stats = self.driver.get_volume_stats()
|
||||||
|
self.assertDictEqual(expected_result, stats)
|
||||||
|
|
||||||
def test_find_pool_should_choose_biggest_pool(self):
|
def test_find_pool_should_choose_biggest_pool(self):
|
||||||
"""create volume should choose biggest pool."""
|
"""create volume should choose biggest pool."""
|
||||||
|
@ -142,9 +142,10 @@ class Lun(object):
|
|||||||
2.1.0 - Added support for specify pool and lss, also improve the code.
|
2.1.0 - Added support for specify pool and lss, also improve the code.
|
||||||
2.1.1 - Added support for replication consistency group.
|
2.1.1 - Added support for replication consistency group.
|
||||||
2.1.2 - Added support for cloning volume asynchronously.
|
2.1.2 - Added support for cloning volume asynchronously.
|
||||||
|
2.3.0 - Added support for reporting backend state.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
VERSION = "2.1.2"
|
VERSION = "2.3.0"
|
||||||
|
|
||||||
class FakeLun(object):
|
class FakeLun(object):
|
||||||
|
|
||||||
@ -462,36 +463,50 @@ class DS8KProxy(proxy.IBMStorageProxy):
|
|||||||
def _update_stats(self):
|
def _update_stats(self):
|
||||||
if self._helper:
|
if self._helper:
|
||||||
storage_pools = self._helper.get_pools()
|
storage_pools = self._helper.get_pools()
|
||||||
if not len(storage_pools):
|
|
||||||
msg = _('No pools found - make sure san_clustername '
|
|
||||||
'is defined in the config file and that the '
|
|
||||||
'pools exist on the storage.')
|
|
||||||
LOG.error(msg)
|
|
||||||
raise exception.CinderException(message=msg)
|
|
||||||
self._helper.update_storage_pools(storage_pools)
|
|
||||||
else:
|
else:
|
||||||
raise exception.VolumeDriverException(
|
raise exception.VolumeDriverException(
|
||||||
message=(_('Backend %s is not initialized.')
|
message=(_('Backend %s is not initialized.')
|
||||||
% self.configuration.volume_backend_name))
|
% self.configuration.volume_backend_name))
|
||||||
|
|
||||||
stats = {
|
stats = {
|
||||||
"volume_backend_name": self.configuration.volume_backend_name,
|
"volume_backend_name":
|
||||||
|
self.configuration.volume_backend_name,
|
||||||
"serial_number": self._helper.backend['storage_unit'],
|
"serial_number": self._helper.backend['storage_unit'],
|
||||||
"extent_pools": self._helper.backend['pools_str'],
|
"reserved_percentage":
|
||||||
"vendor_name": 'IBM',
|
self.configuration.reserved_percentage,
|
||||||
"driver_version": self.full_version,
|
|
||||||
"storage_protocol": self._helper.get_connection_type(),
|
|
||||||
"total_capacity_gb": self._b2gb(
|
|
||||||
sum(p['cap'] for p in storage_pools.values())),
|
|
||||||
"free_capacity_gb": self._b2gb(
|
|
||||||
sum(p['capavail'] for p in storage_pools.values())),
|
|
||||||
"reserved_percentage": self.configuration.reserved_percentage,
|
|
||||||
"consistent_group_snapshot_enabled": True,
|
"consistent_group_snapshot_enabled": True,
|
||||||
"group_replication_enabled": True,
|
"group_replication_enabled": True,
|
||||||
"consistent_group_replication_enabled": True,
|
"consistent_group_replication_enabled": True,
|
||||||
"multiattach": False
|
"multiattach": False,
|
||||||
|
"vendor_name": 'IBM',
|
||||||
|
"driver_version": self.full_version,
|
||||||
|
"storage_protocol": self._helper.get_connection_type(),
|
||||||
|
"extent_pools": 'None',
|
||||||
|
"total_capacity_gb": 0,
|
||||||
|
"free_capacity_gb": 0,
|
||||||
|
"backend_state": 'up'
|
||||||
}
|
}
|
||||||
|
if not len(storage_pools):
|
||||||
|
msg = _('No pools found - make sure san_clustername '
|
||||||
|
'is defined in the config file and that the '
|
||||||
|
'pools exist on the storage.')
|
||||||
|
LOG.error(msg)
|
||||||
|
stats.update({
|
||||||
|
"extent_pools": 'None',
|
||||||
|
"total_capacity_gb": 0,
|
||||||
|
"free_capacity_gb": 0,
|
||||||
|
"backend_state": 'down'
|
||||||
|
})
|
||||||
|
else:
|
||||||
|
self._helper.update_storage_pools(storage_pools)
|
||||||
|
stats.update({
|
||||||
|
"extent_pools": ','.join(p for p in storage_pools.keys()),
|
||||||
|
"total_capacity_gb": self._b2gb(
|
||||||
|
sum(p['cap'] for p in storage_pools.values())),
|
||||||
|
"free_capacity_gb": self._b2gb(
|
||||||
|
sum(p['capavail'] for p in storage_pools.values())),
|
||||||
|
"backend_state": 'up'
|
||||||
|
})
|
||||||
if self._replication_enabled:
|
if self._replication_enabled:
|
||||||
stats['replication_enabled'] = self._replication_enabled
|
stats['replication_enabled'] = self._replication_enabled
|
||||||
|
|
||||||
|
@ -0,0 +1,4 @@
|
|||||||
|
---
|
||||||
|
features:
|
||||||
|
- |
|
||||||
|
Added flag 'backend_state' which will give backend state info in service list.
|
Loading…
Reference in New Issue
Block a user