diff --git a/cinder/tests/unit/volume/drivers/test_fujitsu_dx.py b/cinder/tests/unit/volume/drivers/test_fujitsu_dx.py index 56d52bd486a..7b5807e46d0 100644 --- a/cinder/tests/unit/volume/drivers/test_fujitsu_dx.py +++ b/cinder/tests/unit/volume/drivers/test_fujitsu_dx.py @@ -131,6 +131,8 @@ MAP_STAT = '0' VOL_STAT = '0' FAKE_CAPACITY = 1170368102400 +FAKE_REMAIN = 1168220618752 +FAKE_PROVISION = 1024 # Volume1 in pool abcd1234_TPP FAKE_LUN_ID1 = '600000E00D2A0000002A011500140000' FAKE_LUN_NO1 = '0x0014' @@ -145,37 +147,38 @@ FAKE_LUN_ID_QOS = '600000E00D2A0000002A011500140000' FAKE_LUN_NO_QOS = '0x0014' FAKE_SYSTEM_NAME = 'ET603SA4621302115' # abcd1234_TPP pool -FAKE_USEGB = 2.0 +FAKE_USEGB = 1 # abcd1234_RG pool -FAKE_USEGB2 = 1.0 +FAKE_USEGB2 = 2 FAKE_POOLS = [{ 'path': {'InstanceID': 'FUJITSU:TPP0004'}, 'pool_name': 'abcd1234_TPP', - 'useable_capacity_gb': (FAKE_CAPACITY / units.Gi) * 20 - FAKE_USEGB, - 'multiattach': False, + 'useable_capacity_gb': int( + (FAKE_CAPACITY / units.Mi * 20 - FAKE_PROVISION) / 1024), + 'multiattach': True, 'thick_provisioning_support': False, 'provisioned_capacity_gb': FAKE_USEGB, - 'total_volumes': 2, 'thin_provisioning_support': True, - 'free_capacity_gb': FAKE_CAPACITY / units.Gi - FAKE_USEGB, - 'total_capacity_gb': FAKE_CAPACITY / units.Gi, + 'free_capacity_gb': int(FAKE_CAPACITY / units.Gi - FAKE_USEGB), + 'total_capacity_gb': int(FAKE_CAPACITY / units.Gi), 'max_over_subscription_ratio': '20.0', }, { 'path': {'InstanceID': 'FUJITSU:RSP0005'}, 'pool_name': 'abcd1234_RG', - 'useable_capacity_gb': FAKE_CAPACITY / units.Gi - FAKE_USEGB2, - 'multiattach': False, + 'useable_capacity_gb': int(FAKE_CAPACITY / units.Gi - FAKE_USEGB2), + 'multiattach': True, 'thick_provisioning_support': True, 'provisioned_capacity_gb': FAKE_USEGB2, - 'total_volumes': 1, + 'total_volumes': 2, 'thin_provisioning_support': False, - 'free_capacity_gb': FAKE_CAPACITY / units.Gi - FAKE_USEGB2, - 'total_capacity_gb': FAKE_CAPACITY / units.Gi, + 'free_capacity_gb': int((FAKE_REMAIN * 1.0 / units.Mi) / 1024), + 'total_capacity_gb': int(FAKE_CAPACITY / units.Gi), + 'fragment_capacity_mb': FAKE_REMAIN * 1.0 / units.Mi, 'max_over_subscription_ratio': 1, }] FAKE_STATS = { - 'driver_version': '1.4.2', + 'driver_version': '1.4.3', 'storage_protocol': 'iSCSI', 'vendor_name': 'FUJITSU', 'QoS_support': True, @@ -185,7 +188,7 @@ FAKE_STATS = { 'pools': FAKE_POOLS, } FAKE_STATS2 = { - 'driver_version': '1.4.2', + 'driver_version': '1.4.3', 'storage_protocol': 'FC', 'vendor_name': 'FUJITSU', 'QoS_support': True, @@ -756,8 +759,8 @@ class FakeEternusConnection(object): pool['InstanceID'] = 'FUJITSU:RSP0004' pool['CreationClassName'] = 'FUJITSU_RAIDStoragePool' pool['ElementName'] = 'abcd1234_OSVD' - pool['TotalManagedSpace'] = 1170368102400 - pool['RemainingManagedSpace'] = 1170368102400 - 1 * units.Gi + pool['TotalManagedSpace'] = FAKE_CAPACITY + pool['RemainingManagedSpace'] = FAKE_CAPACITY - 1 * units.Gi pool.path = FJ_StoragePool() pool.path['InstanceID'] = 'FUJITSU:RSP0004' pool.path.classname = 'FUJITSU_RAIDStoragePool' @@ -766,8 +769,8 @@ class FakeEternusConnection(object): pool2['InstanceID'] = 'FUJITSU:RSP0005' pool2['CreationClassName'] = 'FUJITSU_RAIDStoragePool' pool2['ElementName'] = 'abcd1234_RG' - pool2['TotalManagedSpace'] = 1170368102400 - pool2['RemainingManagedSpace'] = 1170368102400 - 1 * units.Gi + pool2['TotalManagedSpace'] = FAKE_CAPACITY + pool2['RemainingManagedSpace'] = FAKE_CAPACITY - 2 * units.Gi pool2.path = FJ_StoragePool() pool2.path['InstanceID'] = 'FUJITSU:RSP0005' pool2.path.classname = 'FUJITSU_RAIDStoragePool' @@ -776,8 +779,8 @@ class FakeEternusConnection(object): pool['InstanceID'] = 'FUJITSU:TPP0004' pool['CreationClassName'] = 'FUJITSU_ThinProvisioningPool' pool['ElementName'] = 'abcd1234_TPP' - pool['TotalManagedSpace'] = 1170368102400 - pool['RemainingManagedSpace'] = 1170368102400 - 2 * units.Gi + pool['TotalManagedSpace'] = FAKE_CAPACITY + pool['RemainingManagedSpace'] = FAKE_CAPACITY - 1 * units.Gi pool.path = FJ_StoragePool() pool.path['InstanceID'] = 'FUJITSU:TPP0004' pool.path.classname = 'FUJITSU_ThinProvisioningPool' @@ -1023,12 +1026,6 @@ class FJFCDriverTestCase(test.TestCase): '\tFF\t20\tFF\tFFFF\t00' '\t600000E00D2A0000002A011500140000' '\t00\t00\tFF\tFF\tFFFFFFFF\t00' - '\t00\tFF\r\n0001\tFJosv_OgEZj1mSvKRvIKOExKktlg==' - '\tA001\t0B\t00\t0000\tabcd1234_OSVD' - '\t0000000000200000\t00\t00\t00000000' - '\t0050\tFF\t00\tFF\tFF\t20\tFF\tFFFF' - '\t00\t600000E00D2A0000002A0115001E0000' - '\t00\t00\tFF\tFF\tFFFFFFFF\t00' '\t00\tFF' % exec_cmdline) elif exec_cmdline.startswith('show enclosure-status'): ret = ('\r\nCLI> %s\r\n00\r\n' @@ -1282,12 +1279,6 @@ class FJISCSIDriverTestCase(test.TestCase): '\tFF\t20\tFF\tFFFF\t00' '\t600000E00D2A0000002A011500140000' '\t00\t00\tFF\tFF\tFFFFFFFF\t00' - '\t00\tFF\r\n0001\tFJosv_OgEZj1mSvKRvIKOExKktlg==' - '\tA001\t0B\t00\t0000\tabcd1234_OSVD' - '\t0000000000200000\t00\t00\t00000000' - '\t0050\tFF\t00\tFF\tFF\t20\tFF\tFFFF' - '\t00\t600000E00D2A0000002A0115001E0000' - '\t00\t00\tFF\tFF\tFFFFFFFF\t00' '\t00\tFF' % exec_cmdline) elif exec_cmdline.startswith('show enclosure-status'): ret = ('\r\nCLI> %s\r\n00\r\n' @@ -1664,7 +1655,7 @@ class FJCLITestCase(test.TestCase): FAKE_POOL_PROVIOSN_OPTION = self.create_fake_options( pool_name='abcd1234_TPP') - FAKE_PROVISION = {**FAKE_CLI_OUTPUT, 'message': FAKE_USEGB} + FAKE_PROVISION = {**FAKE_CLI_OUTPUT, 'message': 2048.0} proviosn = self.cli._show_pool_provision(**FAKE_POOL_PROVIOSN_OPTION) self.assertEqual(FAKE_PROVISION, proviosn) diff --git a/cinder/volume/drivers/fujitsu/eternus_dx/eternus_dx_cli.py b/cinder/volume/drivers/fujitsu/eternus_dx/eternus_dx_cli.py index e4f0c82adb2..840283aa87e 100644 --- a/cinder/volume/drivers/fujitsu/eternus_dx/eternus_dx_cli.py +++ b/cinder/volume/drivers/fujitsu/eternus_dx/eternus_dx_cli.py @@ -266,7 +266,7 @@ class FJDXCLI(object): if clidata[0] == 'FFFF': break data += int(clidata[7], 16) - provision = data / 2097152 + provision = data / 2048 output['message'] = provision except Exception as ex: diff --git a/cinder/volume/drivers/fujitsu/eternus_dx/eternus_dx_common.py b/cinder/volume/drivers/fujitsu/eternus_dx/eternus_dx_common.py index 3acd3b28855..5dc53e63c43 100644 --- a/cinder/volume/drivers/fujitsu/eternus_dx/eternus_dx_common.py +++ b/cinder/volume/drivers/fujitsu/eternus_dx/eternus_dx_common.py @@ -68,10 +68,11 @@ class FJDXCommon(object): 1.4.0 - Add support for QoS. 1.4.1 - Add the method for expanding RAID volumes by CLI. 1.4.2 - Add the secondary check for copy-sessions when deleting volumes. + 1.4.3 - Add fragment capacity information of RAID Group. """ - VERSION = "1.4.2" + VERSION = "1.4.3" stats = { 'driver_version': VERSION, 'storage_protocol': None, @@ -273,7 +274,8 @@ class FJDXCommon(object): return element_path, metadata - def create_pool_info(self, pool_instance, volume_count, pool_type): + def create_pool_info(self, pool_instance, volume_count, pool_type, + **kwargs): """Create pool information from pool instance.""" LOG.debug('create_pool_info, pool_instance: %(pool)s, ' 'volume_count: %(volcount)s, pool_type: %(ptype)s.', @@ -285,32 +287,38 @@ class FJDXCommon(object): LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) - total_gb = pool_instance['TotalManagedSpace'] / units.Gi - free_gb = pool_instance['RemainingManagedSpace'] / units.Gi + total_mb = pool_instance['TotalManagedSpace'] * 1.0 / units.Mi + free_mb = pool_instance['RemainingManagedSpace'] * 1.0 / units.Mi + fragment_mb = free_mb - if hasattr(pool_instance, 'provisioned_capacity_gb'): - prov_gb = pool_instance.provisioned_capacity_gb + if kwargs.get('provisioned_capacity_mb'): + prov_mb = kwargs.get('provisioned_capacity_mb') else: - prov_gb = total_gb - free_gb + prov_mb = total_mb - free_mb if pool_type == 'RAID': - useable_gb = free_gb + useable_mb = free_mb + if kwargs.get('fragment_size'): + if kwargs.get('fragment_size') != -1: + fragment_mb = kwargs.get('fragment_size') / (2 * 1024) + else: + fragment_mb = useable_mb else: - # If the ratio is less than the value on ETERNUS, - # useable_gb may be negative. Avoid over-allocation. - max_capacity = total_gb * float( + max_capacity_mb = total_mb * float( self.configuration.max_over_subscription_ratio) - useable_gb = max_capacity - prov_gb + useable_mb = max_capacity_mb - prov_mb pool = { 'name': pool_instance['ElementName'], 'path': pool_instance.path, - 'total_capacity_gb': total_gb, - 'free_capacity_gb': free_gb, + 'total_capacity_gb': int(total_mb / 1024), + 'free_capacity_gb': int(free_mb / 1024), 'type': pool_type, 'volume_count': volume_count, - 'provisioned_capacity_gb': prov_gb, - 'useable_capacity_gb': useable_gb + 'provisioned_capacity_gb': int(prov_mb / 1024), + 'useable_capacity_gb': int(useable_mb / 1024), + 'useable_capacity_mb': useable_mb, + 'fragment_capacity_mb': fragment_mb, } LOG.debug('create_pool_info, pool: %s.', pool) @@ -1335,8 +1343,8 @@ class FJDXCommon(object): return poollist def _find_pools(self, poolname_list, conn): - """Find Instance or InstanceName of pool by pool name on ETERNUS.""" - LOG.debug('_find_pool, pool name: %s.', poolname_list) + """Find pool instances by using pool name on ETERNUS.""" + LOG.debug('_find_pools, pool names: %s.', poolname_list) target_poolname = list(poolname_list) pools = [] @@ -1344,40 +1352,23 @@ class FJDXCommon(object): # Get pools info from CIM instance(include info about instance path). poollist = self._find_all_pools_instances(conn) - # One eternus backend has only one special pool name - # so just use pool name can get the target pool. for pool, ptype in poollist: poolname = pool['ElementName'] LOG.debug('_find_pools, ' 'pool: %(pool)s, ptype: %(ptype)s.', {'pool': poolname, 'ptype': ptype}) + volume_count = None + provisioned_capacity_mb = None + fragment_size = None if poolname in target_poolname: - try: - volume_list = self._assoc_eternus_names( - pool.path, - conn=conn, - AssocClass='FUJITSU_AllocatedFromStoragePool', - ResultClass='FUJITSU_StorageVolume') - - volume_count = len(volume_list) - except Exception: - msg = (_('_find_pools, ' - 'poolname: %(poolname)s, ' - 'pooltype: %(ptype)s, ' - 'Associator Names, ' - 'cannot connect to ETERNUS.') - % {'ptype': ptype, - 'poolname': poolname}) - LOG.error(msg) - raise exception.VolumeBackendAPIException(data=msg) - if ptype == 'TPP': param_dict = { 'pool-name': poolname } rc, errordesc, data = self._exec_eternus_cli( - 'show_pool_provision', **param_dict) + 'show_pool_provision', + **param_dict) if rc != 0: msg = (_('_find_pools, show_pool_provision, ' @@ -1391,10 +1382,70 @@ class FJDXCommon(object): 'job': data}) LOG.error(msg) raise exception.VolumeBackendAPIException(data=msg) + provisioned_capacity_mb = data + elif ptype == 'RAID': + # Get volume number and fragment capacity information + # only at creation time. + try: + volume_list = self._assoc_eternus_names( + pool.path, + conn=conn, + AssocClass='FUJITSU_AllocatedFromStoragePool', + ResultClass='FUJITSU_StorageVolume') - pool.provisioned_capacity_gb = data + volume_count = len(volume_list) + except Exception: + msg = (_('_find_pools, ' + 'poolname: %(poolname)s, ' + 'pooltype: %(ptype)s, ' + 'Associator Names, ' + 'cannot connect to ETERNUS.') + % {'ptype': ptype, + 'poolname': poolname}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) - poolinfo = self.create_pool_info(pool, volume_count, ptype) + try: + sdpv_list = self._assoc_eternus_names( + pool.path, + conn=conn, + AssocClass='FUJITSU_AllocatedFromStoragePool', + ResultClass='FUJITSU_SDPVPool') + volume_count += len(sdpv_list) + except Exception: + msg = (_('_find_pools, ' + 'pool name: %(poolname)s, ' + 'Associator Names FUJITSU_SDPVPool, ' + 'cannot connect to ETERNUS.') + % {'poolname': poolname}) + LOG.error(msg) + raise exception.VolumeBackendAPIException(data=msg) + + try: + fragment_list = self._assoc_eternus( + pool.path, + conn=conn, + PropertyList=['NumberOfBlocks'], + AssocClass='FUJITSU_AssociatedRemainingExtent', + ResultClass='FUJITSU_FreeExtent') + + if fragment_list: + fragment_size = max( + fragment_list, + key=lambda x: x['NumberOfBlocks']) + else: + fragment_size = {'NumberOfBlocks': 0} + except Exception: + # S2 models do not support this query. + fragment_size = {'NumberOfBlocks': -1} + fragment_size = fragment_size['NumberOfBlocks'] + + poolinfo = self.create_pool_info( + pool, + volume_count, + ptype, + provisioned_capacity_mb=provisioned_capacity_mb, + fragment_size=fragment_size) target_poolname.remove(poolname) pools.append((poolinfo, poolname)) @@ -1404,9 +1455,8 @@ class FJDXCommon(object): if not pools: LOG.warning('_find_pools, all the EternusPools in driver ' - 'configuration file do not exist. ' - 'Please edit the driver configuration file ' - 'to include EternusPool names.') + 'configuration file are not exist. ' + 'Please edit driver configuration file.') # Sort pools in the order defined in driver configuration file. sorted_pools = ( @@ -1427,12 +1477,14 @@ class FJDXCommon(object): else: thin_enabled = False max_ratio = 1 + single_pool['total_volumes'] = pool['volume_count'] + single_pool['fragment_capacity_mb'] = \ + pool['fragment_capacity_mb'] single_pool.update(dict( path=pool['path'], pool_name=pool['name'], total_capacity_gb=pool['total_capacity_gb'], - total_volumes=pool['volume_count'], free_capacity_gb=pool['free_capacity_gb'], provisioned_capacity_gb=pool['provisioned_capacity_gb'], useable_capacity_gb=pool['useable_capacity_gb'], @@ -1440,7 +1492,7 @@ class FJDXCommon(object): thick_provisioning_support=not thin_enabled, max_over_subscription_ratio=max_ratio, )) - single_pool['multiattach'] = False + single_pool['multiattach'] = True pools_stats['pools'].append(single_pool) self.stats['shared_targets'] = True diff --git a/cinder/volume/drivers/fujitsu/eternus_dx/eternus_dx_fc.py b/cinder/volume/drivers/fujitsu/eternus_dx/eternus_dx_fc.py index 68af9f17fe2..2f207c6c6c1 100644 --- a/cinder/volume/drivers/fujitsu/eternus_dx/eternus_dx_fc.py +++ b/cinder/volume/drivers/fujitsu/eternus_dx/eternus_dx_fc.py @@ -149,8 +149,10 @@ class FJDXFCDriver(driver.FibreChannelDriver): def get_volume_stats(self, refresh=False): """Get volume stats.""" + LOG.debug('get_volume_stats, refresh: %s, Enter method.', refresh) + pool_name = None - if refresh is True: + if refresh: data, pool_name = self.common.update_volume_stats() backend_name = self.configuration.safe_get('volume_backend_name') data['volume_backend_name'] = backend_name or 'FJDXFCDriver' @@ -158,7 +160,7 @@ class FJDXFCDriver(driver.FibreChannelDriver): self._stats = data LOG.debug('get_volume_stats, ' - 'pool name: %s.', pool_name) + 'pool name: %s, Exit method.', pool_name) return self._stats def extend_volume(self, volume, new_size): diff --git a/cinder/volume/drivers/fujitsu/eternus_dx/eternus_dx_iscsi.py b/cinder/volume/drivers/fujitsu/eternus_dx/eternus_dx_iscsi.py index e2dfeb4fa79..82bd1a07c20 100644 --- a/cinder/volume/drivers/fujitsu/eternus_dx/eternus_dx_iscsi.py +++ b/cinder/volume/drivers/fujitsu/eternus_dx/eternus_dx_iscsi.py @@ -136,8 +136,10 @@ class FJDXISCSIDriver(driver.ISCSIDriver): def get_volume_stats(self, refresh=False): """Get volume stats.""" + LOG.debug('get_volume_stats, refresh: %s, Enter method.', refresh) + pool_name = None - if refresh is True: + if refresh: data, pool_name = self.common.update_volume_stats() backend_name = self.configuration.safe_get('volume_backend_name') data['volume_backend_name'] = backend_name or 'FJDXISCSIDriver' @@ -145,7 +147,7 @@ class FJDXISCSIDriver(driver.ISCSIDriver): self._stats = data LOG.debug('get_volume_stats, ' - 'pool name: %s.', pool_name) + 'pool name: %s, Exit method.', pool_name) return self._stats def extend_volume(self, volume, new_size): diff --git a/releasenotes/notes/fujitsu-pool-infomation-modified-7ebcbbc11a2e6f28.yaml b/releasenotes/notes/fujitsu-pool-infomation-modified-7ebcbbc11a2e6f28.yaml new file mode 100644 index 00000000000..3e7dfd2d966 --- /dev/null +++ b/releasenotes/notes/fujitsu-pool-infomation-modified-7ebcbbc11a2e6f28.yaml @@ -0,0 +1,31 @@ +--- +features: + - | + Fujitsu ETERNUS DX driver: Add fragment capacity information of RAID Group. + + ETERNUS DX driver have two types of storage pools: RAID Group and + ThinProvisioning Pool. Volumes can not be created in RAID Groups for + the following situations: + + * The maximum sequential physical free space is smaller than the volumes to + be created. + + * 128 volumes have already been created in the RAID Group. + + For the above reasons, to monitor the maximum sequential physical free + space and total volumes in the RAID Group, when updating pool information + using ``Get Volume Stats``, also update parameter ``total_volumes`` + (volumes already created in the RAID Group) and introduce + ``fragment_capacity_mb`` (maximum sequential physical capacity) to the + backend pool information if the backend pool is a RAID Group. + + Meanwhile, since creating volumes on ThinProvisioning Pool does not + encounter the above restrictions, parameter ``fragment_capacity_mb`` will + not be added into the information, and remove the ``total_volumes`` + parameter from the backend pool information when the type of backend pool + is ThinProvisioning Pool. + + These two parameters can be utilized in future implementations of functions + related to ``filter_function``. + + This patch also enabled the ``multiattach`` in the driver information.