diff --git a/cinder/tests/unit/volume/drivers/ibm/test_ds8k_proxy.py b/cinder/tests/unit/volume/drivers/ibm/test_ds8k_proxy.py index 7f508dbfbde..0ff06758abc 100644 --- a/cinder/tests/unit/volume/drivers/ibm/test_ds8k_proxy.py +++ b/cinder/tests/unit/volume/drivers/ibm/test_ds8k_proxy.py @@ -1259,7 +1259,8 @@ class DS8KProxyTest(test.TestCase): "consistent_group_snapshot_enabled": True, "group_replication_enabled": True, "consistent_group_replication_enabled": True, - "multiattach": False + "multiattach": False, + "backend_state": 'up' } self.driver._update_stats() @@ -1283,8 +1284,56 @@ class DS8KProxyTest(test.TestCase): with mock.patch.object(helper.DS8KCommonHelper, 'get_pools') as mock_get_pools: mock_get_pools.return_value = [] - self.assertRaises(exception.CinderException, - self.driver._update_stats) + stats = self.driver.get_volume_stats() + self.assertEqual('down', stats['backend_state']) + self.assertEqual('None', stats['extent_pools']) + self.assertEqual(0, stats['total_capacity_gb']) + self.assertEqual(0, stats['free_capacity_gb']) + + @mock.patch.object(helper.DS8KCommonHelper, 'get_pools') + def test_get_volume_status(self, mock_get_pools): + self.configuration.san_clustername = 'P0, P1' + self.driver = FakeDS8KProxy(self.storage_info, self.logger, + self.exception, self) + from collections import OrderedDict + mock_get_pools.side_effect = [OrderedDict([('P0', + {'node': 0, + 'cap': 21474836480, + 'capavail': 21474836480, + 'name': 'pool1', + 'stgtype': 'fb'}), + ('P1', + {'node': 1, + 'cap': 21474836480, + 'capavail': 21474836480, + 'name': 'pool1', + 'stgtype': 'fb'})]), + OrderedDict([('P1', + {'node': 1, + 'cap': 21474836480, + 'capavail': 21474836480, + 'name': 'pool1', + 'stgtype': 'fb'})])] + self.driver.setup(self.ctxt) + expected_result = { + "volume_backend_name": TEST_VOLUME_BACKEND_NAME, + "serial_number": TEST_SOURCE_SYSTEM_UNIT, + "extent_pools": 'P1', + "vendor_name": 'IBM', + "driver_version": 'IBM Storage (v2.0.0)', + "storage_protocol": storage.XIV_CONNECTION_TYPE_FC, + "total_capacity_gb": 20, + "free_capacity_gb": 20, + "reserved_percentage": 0, + "consistent_group_snapshot_enabled": True, + "group_replication_enabled": True, + "consistent_group_replication_enabled": True, + "multiattach": False, + "backend_state": 'up' + } + + stats = self.driver.get_volume_stats() + self.assertDictEqual(expected_result, stats) def test_find_pool_should_choose_biggest_pool(self): """create volume should choose biggest pool.""" diff --git a/cinder/volume/drivers/ibm/ibm_storage/ds8k_proxy.py b/cinder/volume/drivers/ibm/ibm_storage/ds8k_proxy.py index f995553c067..2f4d11694b6 100644 --- a/cinder/volume/drivers/ibm/ibm_storage/ds8k_proxy.py +++ b/cinder/volume/drivers/ibm/ibm_storage/ds8k_proxy.py @@ -142,9 +142,10 @@ class Lun(object): 2.1.0 - Added support for specify pool and lss, also improve the code. 2.1.1 - Added support for replication consistency group. 2.1.2 - Added support for cloning volume asynchronously. + 2.3.0 - Added support for reporting backend state. """ - VERSION = "2.1.2" + VERSION = "2.3.0" class FakeLun(object): @@ -462,36 +463,50 @@ class DS8KProxy(proxy.IBMStorageProxy): def _update_stats(self): if self._helper: storage_pools = self._helper.get_pools() - if not len(storage_pools): - msg = _('No pools found - make sure san_clustername ' - 'is defined in the config file and that the ' - 'pools exist on the storage.') - LOG.error(msg) - raise exception.CinderException(message=msg) - self._helper.update_storage_pools(storage_pools) else: raise exception.VolumeDriverException( message=(_('Backend %s is not initialized.') % self.configuration.volume_backend_name)) stats = { - "volume_backend_name": self.configuration.volume_backend_name, + "volume_backend_name": + self.configuration.volume_backend_name, "serial_number": self._helper.backend['storage_unit'], - "extent_pools": self._helper.backend['pools_str'], - "vendor_name": 'IBM', - "driver_version": self.full_version, - "storage_protocol": self._helper.get_connection_type(), - "total_capacity_gb": self._b2gb( - sum(p['cap'] for p in storage_pools.values())), - "free_capacity_gb": self._b2gb( - sum(p['capavail'] for p in storage_pools.values())), - "reserved_percentage": self.configuration.reserved_percentage, + "reserved_percentage": + self.configuration.reserved_percentage, "consistent_group_snapshot_enabled": True, "group_replication_enabled": True, "consistent_group_replication_enabled": True, - "multiattach": False + "multiattach": False, + "vendor_name": 'IBM', + "driver_version": self.full_version, + "storage_protocol": self._helper.get_connection_type(), + "extent_pools": 'None', + "total_capacity_gb": 0, + "free_capacity_gb": 0, + "backend_state": 'up' } - + if not len(storage_pools): + msg = _('No pools found - make sure san_clustername ' + 'is defined in the config file and that the ' + 'pools exist on the storage.') + LOG.error(msg) + stats.update({ + "extent_pools": 'None', + "total_capacity_gb": 0, + "free_capacity_gb": 0, + "backend_state": 'down' + }) + else: + self._helper.update_storage_pools(storage_pools) + stats.update({ + "extent_pools": ','.join(p for p in storage_pools.keys()), + "total_capacity_gb": self._b2gb( + sum(p['cap'] for p in storage_pools.values())), + "free_capacity_gb": self._b2gb( + sum(p['capavail'] for p in storage_pools.values())), + "backend_state": 'up' + }) if self._replication_enabled: stats['replication_enabled'] = self._replication_enabled diff --git a/releasenotes/notes/ds8k-report-backend-state-in-service-list-f0898950a0f4b122.yaml b/releasenotes/notes/ds8k-report-backend-state-in-service-list-f0898950a0f4b122.yaml new file mode 100644 index 00000000000..4e045c03f67 --- /dev/null +++ b/releasenotes/notes/ds8k-report-backend-state-in-service-list-f0898950a0f4b122.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Added flag 'backend_state' which will give backend state info in service list.