Fujitsu Driver: Improve get volume information

Processing the information separately according to the type of
backend pool.

Change-Id: I647d9ce2e422888d978c27aea418c9a8bc1db43b
This commit is contained in:
inori 2023-11-16 04:46:52 -05:00 committed by xuq.fnstxz
parent 07dd49babe
commit a94e332818
6 changed files with 163 additions and 85 deletions

View File

@ -131,6 +131,8 @@ MAP_STAT = '0'
VOL_STAT = '0' VOL_STAT = '0'
FAKE_CAPACITY = 1170368102400 FAKE_CAPACITY = 1170368102400
FAKE_REMAIN = 1168220618752
FAKE_PROVISION = 1024
# Volume1 in pool abcd1234_TPP # Volume1 in pool abcd1234_TPP
FAKE_LUN_ID1 = '600000E00D2A0000002A011500140000' FAKE_LUN_ID1 = '600000E00D2A0000002A011500140000'
FAKE_LUN_NO1 = '0x0014' FAKE_LUN_NO1 = '0x0014'
@ -145,37 +147,38 @@ FAKE_LUN_ID_QOS = '600000E00D2A0000002A011500140000'
FAKE_LUN_NO_QOS = '0x0014' FAKE_LUN_NO_QOS = '0x0014'
FAKE_SYSTEM_NAME = 'ET603SA4621302115' FAKE_SYSTEM_NAME = 'ET603SA4621302115'
# abcd1234_TPP pool # abcd1234_TPP pool
FAKE_USEGB = 2.0 FAKE_USEGB = 1
# abcd1234_RG pool # abcd1234_RG pool
FAKE_USEGB2 = 1.0 FAKE_USEGB2 = 2
FAKE_POOLS = [{ FAKE_POOLS = [{
'path': {'InstanceID': 'FUJITSU:TPP0004'}, 'path': {'InstanceID': 'FUJITSU:TPP0004'},
'pool_name': 'abcd1234_TPP', 'pool_name': 'abcd1234_TPP',
'useable_capacity_gb': (FAKE_CAPACITY / units.Gi) * 20 - FAKE_USEGB, 'useable_capacity_gb': int(
'multiattach': False, (FAKE_CAPACITY / units.Mi * 20 - FAKE_PROVISION) / 1024),
'multiattach': True,
'thick_provisioning_support': False, 'thick_provisioning_support': False,
'provisioned_capacity_gb': FAKE_USEGB, 'provisioned_capacity_gb': FAKE_USEGB,
'total_volumes': 2,
'thin_provisioning_support': True, 'thin_provisioning_support': True,
'free_capacity_gb': FAKE_CAPACITY / units.Gi - FAKE_USEGB, 'free_capacity_gb': int(FAKE_CAPACITY / units.Gi - FAKE_USEGB),
'total_capacity_gb': FAKE_CAPACITY / units.Gi, 'total_capacity_gb': int(FAKE_CAPACITY / units.Gi),
'max_over_subscription_ratio': '20.0', 'max_over_subscription_ratio': '20.0',
}, { }, {
'path': {'InstanceID': 'FUJITSU:RSP0005'}, 'path': {'InstanceID': 'FUJITSU:RSP0005'},
'pool_name': 'abcd1234_RG', 'pool_name': 'abcd1234_RG',
'useable_capacity_gb': FAKE_CAPACITY / units.Gi - FAKE_USEGB2, 'useable_capacity_gb': int(FAKE_CAPACITY / units.Gi - FAKE_USEGB2),
'multiattach': False, 'multiattach': True,
'thick_provisioning_support': True, 'thick_provisioning_support': True,
'provisioned_capacity_gb': FAKE_USEGB2, 'provisioned_capacity_gb': FAKE_USEGB2,
'total_volumes': 1, 'total_volumes': 2,
'thin_provisioning_support': False, 'thin_provisioning_support': False,
'free_capacity_gb': FAKE_CAPACITY / units.Gi - FAKE_USEGB2, 'free_capacity_gb': int((FAKE_REMAIN * 1.0 / units.Mi) / 1024),
'total_capacity_gb': FAKE_CAPACITY / units.Gi, 'total_capacity_gb': int(FAKE_CAPACITY / units.Gi),
'fragment_capacity_mb': FAKE_REMAIN * 1.0 / units.Mi,
'max_over_subscription_ratio': 1, 'max_over_subscription_ratio': 1,
}] }]
FAKE_STATS = { FAKE_STATS = {
'driver_version': '1.4.2', 'driver_version': '1.4.3',
'storage_protocol': 'iSCSI', 'storage_protocol': 'iSCSI',
'vendor_name': 'FUJITSU', 'vendor_name': 'FUJITSU',
'QoS_support': True, 'QoS_support': True,
@ -185,7 +188,7 @@ FAKE_STATS = {
'pools': FAKE_POOLS, 'pools': FAKE_POOLS,
} }
FAKE_STATS2 = { FAKE_STATS2 = {
'driver_version': '1.4.2', 'driver_version': '1.4.3',
'storage_protocol': 'FC', 'storage_protocol': 'FC',
'vendor_name': 'FUJITSU', 'vendor_name': 'FUJITSU',
'QoS_support': True, 'QoS_support': True,
@ -756,8 +759,8 @@ class FakeEternusConnection(object):
pool['InstanceID'] = 'FUJITSU:RSP0004' pool['InstanceID'] = 'FUJITSU:RSP0004'
pool['CreationClassName'] = 'FUJITSU_RAIDStoragePool' pool['CreationClassName'] = 'FUJITSU_RAIDStoragePool'
pool['ElementName'] = 'abcd1234_OSVD' pool['ElementName'] = 'abcd1234_OSVD'
pool['TotalManagedSpace'] = 1170368102400 pool['TotalManagedSpace'] = FAKE_CAPACITY
pool['RemainingManagedSpace'] = 1170368102400 - 1 * units.Gi pool['RemainingManagedSpace'] = FAKE_CAPACITY - 1 * units.Gi
pool.path = FJ_StoragePool() pool.path = FJ_StoragePool()
pool.path['InstanceID'] = 'FUJITSU:RSP0004' pool.path['InstanceID'] = 'FUJITSU:RSP0004'
pool.path.classname = 'FUJITSU_RAIDStoragePool' pool.path.classname = 'FUJITSU_RAIDStoragePool'
@ -766,8 +769,8 @@ class FakeEternusConnection(object):
pool2['InstanceID'] = 'FUJITSU:RSP0005' pool2['InstanceID'] = 'FUJITSU:RSP0005'
pool2['CreationClassName'] = 'FUJITSU_RAIDStoragePool' pool2['CreationClassName'] = 'FUJITSU_RAIDStoragePool'
pool2['ElementName'] = 'abcd1234_RG' pool2['ElementName'] = 'abcd1234_RG'
pool2['TotalManagedSpace'] = 1170368102400 pool2['TotalManagedSpace'] = FAKE_CAPACITY
pool2['RemainingManagedSpace'] = 1170368102400 - 1 * units.Gi pool2['RemainingManagedSpace'] = FAKE_CAPACITY - 2 * units.Gi
pool2.path = FJ_StoragePool() pool2.path = FJ_StoragePool()
pool2.path['InstanceID'] = 'FUJITSU:RSP0005' pool2.path['InstanceID'] = 'FUJITSU:RSP0005'
pool2.path.classname = 'FUJITSU_RAIDStoragePool' pool2.path.classname = 'FUJITSU_RAIDStoragePool'
@ -776,8 +779,8 @@ class FakeEternusConnection(object):
pool['InstanceID'] = 'FUJITSU:TPP0004' pool['InstanceID'] = 'FUJITSU:TPP0004'
pool['CreationClassName'] = 'FUJITSU_ThinProvisioningPool' pool['CreationClassName'] = 'FUJITSU_ThinProvisioningPool'
pool['ElementName'] = 'abcd1234_TPP' pool['ElementName'] = 'abcd1234_TPP'
pool['TotalManagedSpace'] = 1170368102400 pool['TotalManagedSpace'] = FAKE_CAPACITY
pool['RemainingManagedSpace'] = 1170368102400 - 2 * units.Gi pool['RemainingManagedSpace'] = FAKE_CAPACITY - 1 * units.Gi
pool.path = FJ_StoragePool() pool.path = FJ_StoragePool()
pool.path['InstanceID'] = 'FUJITSU:TPP0004' pool.path['InstanceID'] = 'FUJITSU:TPP0004'
pool.path.classname = 'FUJITSU_ThinProvisioningPool' pool.path.classname = 'FUJITSU_ThinProvisioningPool'
@ -1023,12 +1026,6 @@ class FJFCDriverTestCase(test.TestCase):
'\tFF\t20\tFF\tFFFF\t00' '\tFF\t20\tFF\tFFFF\t00'
'\t600000E00D2A0000002A011500140000' '\t600000E00D2A0000002A011500140000'
'\t00\t00\tFF\tFF\tFFFFFFFF\t00' '\t00\t00\tFF\tFF\tFFFFFFFF\t00'
'\t00\tFF\r\n0001\tFJosv_OgEZj1mSvKRvIKOExKktlg=='
'\tA001\t0B\t00\t0000\tabcd1234_OSVD'
'\t0000000000200000\t00\t00\t00000000'
'\t0050\tFF\t00\tFF\tFF\t20\tFF\tFFFF'
'\t00\t600000E00D2A0000002A0115001E0000'
'\t00\t00\tFF\tFF\tFFFFFFFF\t00'
'\t00\tFF' % exec_cmdline) '\t00\tFF' % exec_cmdline)
elif exec_cmdline.startswith('show enclosure-status'): elif exec_cmdline.startswith('show enclosure-status'):
ret = ('\r\nCLI> %s\r\n00\r\n' ret = ('\r\nCLI> %s\r\n00\r\n'
@ -1282,12 +1279,6 @@ class FJISCSIDriverTestCase(test.TestCase):
'\tFF\t20\tFF\tFFFF\t00' '\tFF\t20\tFF\tFFFF\t00'
'\t600000E00D2A0000002A011500140000' '\t600000E00D2A0000002A011500140000'
'\t00\t00\tFF\tFF\tFFFFFFFF\t00' '\t00\t00\tFF\tFF\tFFFFFFFF\t00'
'\t00\tFF\r\n0001\tFJosv_OgEZj1mSvKRvIKOExKktlg=='
'\tA001\t0B\t00\t0000\tabcd1234_OSVD'
'\t0000000000200000\t00\t00\t00000000'
'\t0050\tFF\t00\tFF\tFF\t20\tFF\tFFFF'
'\t00\t600000E00D2A0000002A0115001E0000'
'\t00\t00\tFF\tFF\tFFFFFFFF\t00'
'\t00\tFF' % exec_cmdline) '\t00\tFF' % exec_cmdline)
elif exec_cmdline.startswith('show enclosure-status'): elif exec_cmdline.startswith('show enclosure-status'):
ret = ('\r\nCLI> %s\r\n00\r\n' ret = ('\r\nCLI> %s\r\n00\r\n'
@ -1664,7 +1655,7 @@ class FJCLITestCase(test.TestCase):
FAKE_POOL_PROVIOSN_OPTION = self.create_fake_options( FAKE_POOL_PROVIOSN_OPTION = self.create_fake_options(
pool_name='abcd1234_TPP') pool_name='abcd1234_TPP')
FAKE_PROVISION = {**FAKE_CLI_OUTPUT, 'message': FAKE_USEGB} FAKE_PROVISION = {**FAKE_CLI_OUTPUT, 'message': 2048.0}
proviosn = self.cli._show_pool_provision(**FAKE_POOL_PROVIOSN_OPTION) proviosn = self.cli._show_pool_provision(**FAKE_POOL_PROVIOSN_OPTION)
self.assertEqual(FAKE_PROVISION, proviosn) self.assertEqual(FAKE_PROVISION, proviosn)

View File

@ -266,7 +266,7 @@ class FJDXCLI(object):
if clidata[0] == 'FFFF': if clidata[0] == 'FFFF':
break break
data += int(clidata[7], 16) data += int(clidata[7], 16)
provision = data / 2097152 provision = data / 2048
output['message'] = provision output['message'] = provision
except Exception as ex: except Exception as ex:

View File

@ -68,10 +68,11 @@ class FJDXCommon(object):
1.4.0 - Add support for QoS. 1.4.0 - Add support for QoS.
1.4.1 - Add the method for expanding RAID volumes by CLI. 1.4.1 - Add the method for expanding RAID volumes by CLI.
1.4.2 - Add the secondary check for copy-sessions when deleting volumes. 1.4.2 - Add the secondary check for copy-sessions when deleting volumes.
1.4.3 - Add fragment capacity information of RAID Group.
""" """
VERSION = "1.4.2" VERSION = "1.4.3"
stats = { stats = {
'driver_version': VERSION, 'driver_version': VERSION,
'storage_protocol': None, 'storage_protocol': None,
@ -273,7 +274,8 @@ class FJDXCommon(object):
return element_path, metadata return element_path, metadata
def create_pool_info(self, pool_instance, volume_count, pool_type): def create_pool_info(self, pool_instance, volume_count, pool_type,
**kwargs):
"""Create pool information from pool instance.""" """Create pool information from pool instance."""
LOG.debug('create_pool_info, pool_instance: %(pool)s, ' LOG.debug('create_pool_info, pool_instance: %(pool)s, '
'volume_count: %(volcount)s, pool_type: %(ptype)s.', 'volume_count: %(volcount)s, pool_type: %(ptype)s.',
@ -285,32 +287,38 @@ class FJDXCommon(object):
LOG.error(msg) LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg) raise exception.VolumeBackendAPIException(data=msg)
total_gb = pool_instance['TotalManagedSpace'] / units.Gi total_mb = pool_instance['TotalManagedSpace'] * 1.0 / units.Mi
free_gb = pool_instance['RemainingManagedSpace'] / units.Gi free_mb = pool_instance['RemainingManagedSpace'] * 1.0 / units.Mi
fragment_mb = free_mb
if hasattr(pool_instance, 'provisioned_capacity_gb'): if kwargs.get('provisioned_capacity_mb'):
prov_gb = pool_instance.provisioned_capacity_gb prov_mb = kwargs.get('provisioned_capacity_mb')
else: else:
prov_gb = total_gb - free_gb prov_mb = total_mb - free_mb
if pool_type == 'RAID': if pool_type == 'RAID':
useable_gb = free_gb useable_mb = free_mb
if kwargs.get('fragment_size'):
if kwargs.get('fragment_size') != -1:
fragment_mb = kwargs.get('fragment_size') / (2 * 1024)
else: else:
# If the ratio is less than the value on ETERNUS, fragment_mb = useable_mb
# useable_gb may be negative. Avoid over-allocation. else:
max_capacity = total_gb * float( max_capacity_mb = total_mb * float(
self.configuration.max_over_subscription_ratio) self.configuration.max_over_subscription_ratio)
useable_gb = max_capacity - prov_gb useable_mb = max_capacity_mb - prov_mb
pool = { pool = {
'name': pool_instance['ElementName'], 'name': pool_instance['ElementName'],
'path': pool_instance.path, 'path': pool_instance.path,
'total_capacity_gb': total_gb, 'total_capacity_gb': int(total_mb / 1024),
'free_capacity_gb': free_gb, 'free_capacity_gb': int(free_mb / 1024),
'type': pool_type, 'type': pool_type,
'volume_count': volume_count, 'volume_count': volume_count,
'provisioned_capacity_gb': prov_gb, 'provisioned_capacity_gb': int(prov_mb / 1024),
'useable_capacity_gb': useable_gb 'useable_capacity_gb': int(useable_mb / 1024),
'useable_capacity_mb': useable_mb,
'fragment_capacity_mb': fragment_mb,
} }
LOG.debug('create_pool_info, pool: %s.', pool) LOG.debug('create_pool_info, pool: %s.', pool)
@ -1335,8 +1343,8 @@ class FJDXCommon(object):
return poollist return poollist
def _find_pools(self, poolname_list, conn): def _find_pools(self, poolname_list, conn):
"""Find Instance or InstanceName of pool by pool name on ETERNUS.""" """Find pool instances by using pool name on ETERNUS."""
LOG.debug('_find_pool, pool name: %s.', poolname_list) LOG.debug('_find_pools, pool names: %s.', poolname_list)
target_poolname = list(poolname_list) target_poolname = list(poolname_list)
pools = [] pools = []
@ -1344,15 +1352,40 @@ class FJDXCommon(object):
# Get pools info from CIM instance(include info about instance path). # Get pools info from CIM instance(include info about instance path).
poollist = self._find_all_pools_instances(conn) poollist = self._find_all_pools_instances(conn)
# One eternus backend has only one special pool name
# so just use pool name can get the target pool.
for pool, ptype in poollist: for pool, ptype in poollist:
poolname = pool['ElementName'] poolname = pool['ElementName']
LOG.debug('_find_pools, ' LOG.debug('_find_pools, '
'pool: %(pool)s, ptype: %(ptype)s.', 'pool: %(pool)s, ptype: %(ptype)s.',
{'pool': poolname, 'ptype': ptype}) {'pool': poolname, 'ptype': ptype})
volume_count = None
provisioned_capacity_mb = None
fragment_size = None
if poolname in target_poolname: if poolname in target_poolname:
if ptype == 'TPP':
param_dict = {
'pool-name': poolname
}
rc, errordesc, data = self._exec_eternus_cli(
'show_pool_provision',
**param_dict)
if rc != 0:
msg = (_('_find_pools, show_pool_provision, '
'pool name: %(pool_name)s, '
'Return code: %(rc)lu, '
'Error: %(errordesc)s, '
'Message: %(job)s.')
% {'pool_name': poolname,
'rc': rc,
'errordesc': errordesc,
'job': data})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
provisioned_capacity_mb = data
elif ptype == 'RAID':
# Get volume number and fragment capacity information
# only at creation time.
try: try:
volume_list = self._assoc_eternus_names( volume_list = self._assoc_eternus_names(
pool.path, pool.path,
@ -1372,29 +1405,47 @@ class FJDXCommon(object):
LOG.error(msg) LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg) raise exception.VolumeBackendAPIException(data=msg)
if ptype == 'TPP': try:
param_dict = { sdpv_list = self._assoc_eternus_names(
'pool-name': poolname pool.path,
} conn=conn,
rc, errordesc, data = self._exec_eternus_cli( AssocClass='FUJITSU_AllocatedFromStoragePool',
'show_pool_provision', **param_dict) ResultClass='FUJITSU_SDPVPool')
volume_count += len(sdpv_list)
if rc != 0: except Exception:
msg = (_('_find_pools, show_pool_provision, ' msg = (_('_find_pools, '
'pool name: %(pool_name)s, ' 'pool name: %(poolname)s, '
'Return code: %(rc)lu, ' 'Associator Names FUJITSU_SDPVPool, '
'Error: %(errordesc)s, ' 'cannot connect to ETERNUS.')
'Message: %(job)s.') % {'poolname': poolname})
% {'pool_name': poolname,
'rc': rc,
'errordesc': errordesc,
'job': data})
LOG.error(msg) LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg) raise exception.VolumeBackendAPIException(data=msg)
pool.provisioned_capacity_gb = data try:
fragment_list = self._assoc_eternus(
pool.path,
conn=conn,
PropertyList=['NumberOfBlocks'],
AssocClass='FUJITSU_AssociatedRemainingExtent',
ResultClass='FUJITSU_FreeExtent')
poolinfo = self.create_pool_info(pool, volume_count, ptype) if fragment_list:
fragment_size = max(
fragment_list,
key=lambda x: x['NumberOfBlocks'])
else:
fragment_size = {'NumberOfBlocks': 0}
except Exception:
# S2 models do not support this query.
fragment_size = {'NumberOfBlocks': -1}
fragment_size = fragment_size['NumberOfBlocks']
poolinfo = self.create_pool_info(
pool,
volume_count,
ptype,
provisioned_capacity_mb=provisioned_capacity_mb,
fragment_size=fragment_size)
target_poolname.remove(poolname) target_poolname.remove(poolname)
pools.append((poolinfo, poolname)) pools.append((poolinfo, poolname))
@ -1404,9 +1455,8 @@ class FJDXCommon(object):
if not pools: if not pools:
LOG.warning('_find_pools, all the EternusPools in driver ' LOG.warning('_find_pools, all the EternusPools in driver '
'configuration file do not exist. ' 'configuration file are not exist. '
'Please edit the driver configuration file ' 'Please edit driver configuration file.')
'to include EternusPool names.')
# Sort pools in the order defined in driver configuration file. # Sort pools in the order defined in driver configuration file.
sorted_pools = ( sorted_pools = (
@ -1427,12 +1477,14 @@ class FJDXCommon(object):
else: else:
thin_enabled = False thin_enabled = False
max_ratio = 1 max_ratio = 1
single_pool['total_volumes'] = pool['volume_count']
single_pool['fragment_capacity_mb'] = \
pool['fragment_capacity_mb']
single_pool.update(dict( single_pool.update(dict(
path=pool['path'], path=pool['path'],
pool_name=pool['name'], pool_name=pool['name'],
total_capacity_gb=pool['total_capacity_gb'], total_capacity_gb=pool['total_capacity_gb'],
total_volumes=pool['volume_count'],
free_capacity_gb=pool['free_capacity_gb'], free_capacity_gb=pool['free_capacity_gb'],
provisioned_capacity_gb=pool['provisioned_capacity_gb'], provisioned_capacity_gb=pool['provisioned_capacity_gb'],
useable_capacity_gb=pool['useable_capacity_gb'], useable_capacity_gb=pool['useable_capacity_gb'],
@ -1440,7 +1492,7 @@ class FJDXCommon(object):
thick_provisioning_support=not thin_enabled, thick_provisioning_support=not thin_enabled,
max_over_subscription_ratio=max_ratio, max_over_subscription_ratio=max_ratio,
)) ))
single_pool['multiattach'] = False single_pool['multiattach'] = True
pools_stats['pools'].append(single_pool) pools_stats['pools'].append(single_pool)
self.stats['shared_targets'] = True self.stats['shared_targets'] = True

View File

@ -149,8 +149,10 @@ class FJDXFCDriver(driver.FibreChannelDriver):
def get_volume_stats(self, refresh=False): def get_volume_stats(self, refresh=False):
"""Get volume stats.""" """Get volume stats."""
LOG.debug('get_volume_stats, refresh: %s, Enter method.', refresh)
pool_name = None pool_name = None
if refresh is True: if refresh:
data, pool_name = self.common.update_volume_stats() data, pool_name = self.common.update_volume_stats()
backend_name = self.configuration.safe_get('volume_backend_name') backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or 'FJDXFCDriver' data['volume_backend_name'] = backend_name or 'FJDXFCDriver'
@ -158,7 +160,7 @@ class FJDXFCDriver(driver.FibreChannelDriver):
self._stats = data self._stats = data
LOG.debug('get_volume_stats, ' LOG.debug('get_volume_stats, '
'pool name: %s.', pool_name) 'pool name: %s, Exit method.', pool_name)
return self._stats return self._stats
def extend_volume(self, volume, new_size): def extend_volume(self, volume, new_size):

View File

@ -136,8 +136,10 @@ class FJDXISCSIDriver(driver.ISCSIDriver):
def get_volume_stats(self, refresh=False): def get_volume_stats(self, refresh=False):
"""Get volume stats.""" """Get volume stats."""
LOG.debug('get_volume_stats, refresh: %s, Enter method.', refresh)
pool_name = None pool_name = None
if refresh is True: if refresh:
data, pool_name = self.common.update_volume_stats() data, pool_name = self.common.update_volume_stats()
backend_name = self.configuration.safe_get('volume_backend_name') backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or 'FJDXISCSIDriver' data['volume_backend_name'] = backend_name or 'FJDXISCSIDriver'
@ -145,7 +147,7 @@ class FJDXISCSIDriver(driver.ISCSIDriver):
self._stats = data self._stats = data
LOG.debug('get_volume_stats, ' LOG.debug('get_volume_stats, '
'pool name: %s.', pool_name) 'pool name: %s, Exit method.', pool_name)
return self._stats return self._stats
def extend_volume(self, volume, new_size): def extend_volume(self, volume, new_size):

View File

@ -0,0 +1,31 @@
---
features:
- |
Fujitsu ETERNUS DX driver: Add fragment capacity information of RAID Group.
ETERNUS DX driver have two types of storage pools: RAID Group and
ThinProvisioning Pool. Volumes can not be created in RAID Groups for
the following situations:
* The maximum sequential physical free space is smaller than the volumes to
be created.
* 128 volumes have already been created in the RAID Group.
For the above reasons, to monitor the maximum sequential physical free
space and total volumes in the RAID Group, when updating pool information
using ``Get Volume Stats``, also update parameter ``total_volumes``
(volumes already created in the RAID Group) and introduce
``fragment_capacity_mb`` (maximum sequential physical capacity) to the
backend pool information if the backend pool is a RAID Group.
Meanwhile, since creating volumes on ThinProvisioning Pool does not
encounter the above restrictions, parameter ``fragment_capacity_mb`` will
not be added into the information, and remove the ``total_volumes``
parameter from the backend pool information when the type of backend pool
is ThinProvisioning Pool.
These two parameters can be utilized in future implementations of functions
related to ``filter_function``.
This patch also enabled the ``multiattach`` in the driver information.