HPE Nimble: Add replication

This patch adds group replication feature to HPE Nimble driver.

Implements: blueprint nimble-replication-support
Change-Id: Ic8dc32762ce1203cf7e1c50f5002d0e754f2a5dc
This commit is contained in:
raghavendrat 2024-02-19 13:40:38 +00:00
parent 7245ec027b
commit 5e7f37f316
5 changed files with 563 additions and 11 deletions

View File

@ -39,7 +39,7 @@ NIMBLE_URLLIB2 = 'cinder.volume.drivers.hpe.nimble.requests'
NIMBLE_RANDOM = 'cinder.volume.drivers.hpe.nimble.random'
NIMBLE_ISCSI_DRIVER = 'cinder.volume.drivers.hpe.nimble.NimbleISCSIDriver'
NIMBLE_FC_DRIVER = 'cinder.volume.drivers.hpe.nimble.NimbleFCDriver'
DRIVER_VERSION = '4.2.0'
DRIVER_VERSION = '4.3.0'
nimble.DEFAULT_SLEEP = 0
FAKE_POSITIVE_LOGIN_RESPONSE_1 = '2c20aad78a220ed1dae21dcd6f9446f5'
@ -266,10 +266,19 @@ SRC_CONSIS_GROUP_ID = '7d7dfa02-ac6e-48cb-96af-8a0cd3008d47'
FAKE_SRC_GROUP = fake_group.fake_group_obj(
admin_context, id = SRC_CONSIS_GROUP_ID, status = 'available')
REPL_DEVICES = [{
'san_login': 'nimble',
'san_password': 'nimble_pass',
'san_ip': '10.18.108.66',
'schedule_name': 'every-minute',
'downstream_partner': 'nimblevsagroup2',
'period': 1,
'period_unit': 'minutes'}]
def create_configuration(username, password, ip_address,
pool_name=None, subnet_label=None,
thin_provision=True):
thin_provision=True, devices=None):
configuration = mock.Mock()
configuration.san_login = username
configuration.san_password = password
@ -278,6 +287,7 @@ def create_configuration(username, password, ip_address,
configuration.nimble_pool_name = pool_name
configuration.nimble_subnet_label = subnet_label
configuration.safe_get.return_value = 'NIMBLE'
configuration.replication_device = devices
return configuration
@ -767,6 +777,44 @@ class NimbleDriverVolumeTestCase(NimbleDriverBaseTestCase):
'display_name': '',
'display_description': ''})
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@mock.patch.object(volume_types, 'get_volume_type_extra_specs',
mock.Mock(type_id=FAKE_TYPE_ID, return_value={
'nimble:perfpol-name': 'default',
'nimble:encryption': 'yes'}))
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP,
'default', '*', devices=REPL_DEVICES))
def test_create_volume_replicated(self):
self.mock_client_service.get_vol_info.return_value = (
FAKE_GET_VOL_INFO_RESPONSE)
self.mock_client_service.get_netconfig.return_value = (
FAKE_POSITIVE_NETCONFIG_RESPONSE)
self.assertEqual({
'provider_location': '172.18.108.21:3260 iqn.test',
'provider_auth': None,
'replication_status': 'enabled'},
self.driver.create_volume({'name': 'testvolume',
'size': 1,
'volume_type_id': None,
'display_name': '',
'display_description': ''}))
self.mock_client_service.create_vol.assert_called_once_with(
{'name': 'testvolume',
'size': 1,
'volume_type_id': None,
'display_name': '',
'display_description': ''},
'default',
False,
'iSCSI',
False)
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
@ -844,6 +892,28 @@ class NimbleDriverVolumeTestCase(NimbleDriverBaseTestCase):
self.mock_client_service.assert_has_calls(expected_calls)
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
mock.Mock(return_value=[]))
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP,
'default', '*', devices=REPL_DEVICES))
@mock.patch(NIMBLE_ISCSI_DRIVER + ".is_volume_backup_clone", mock.Mock(
return_value=['', '']))
def test_delete_volume_replicated(self):
self.mock_client_service.online_vol.return_value = (
FAKE_GENERIC_POSITIVE_RESPONSE)
self.mock_client_service.delete_vol.return_value = (
FAKE_GENERIC_POSITIVE_RESPONSE)
self.driver.delete_volume({'name': 'testvolume'})
expected_calls = [mock.call.online_vol(
'testvolume', False),
mock.call.delete_vol('testvolume')]
self.mock_client_service.assert_has_calls(expected_calls)
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@mock.patch.object(obj_volume.VolumeList, 'get_all_by_host',
@ -1139,7 +1209,10 @@ class NimbleDriverVolumeTestCase(NimbleDriverBaseTestCase):
'QoS_support': False,
'multiattach': True,
'thin_provisioning_support': True,
'consistent_group_snapshot_enabled': True}]}
'consistent_group_snapshot_enabled': True,
'replication_enabled': False,
'consistent_group_replication_enabled':
False}]}
self.assertEqual(
expected_res,
self.driver.get_volume_stats(refresh=True))
@ -1172,6 +1245,70 @@ class NimbleDriverVolumeTestCase(NimbleDriverBaseTestCase):
]
self.mock_client_service.assert_has_calls(expected_calls)
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP,
'default', '*', devices=REPL_DEVICES))
def test_enable_replication(self):
ctx = context.get_admin_context()
group = mock.MagicMock()
volumes = [fake_volume.fake_volume_obj(None)]
return_values = self.driver.enable_replication(ctx, group, volumes)
self.mock_client_service.set_schedule_for_volcoll.assert_called_once()
model_update = return_values[0]
self.assertEqual(model_update['replication_status'], 'enabled')
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP,
'default', '*', devices=REPL_DEVICES))
def test_disable_replication(self):
ctx = context.get_admin_context()
group = mock.MagicMock()
volumes = [fake_volume.fake_volume_obj(None)]
return_values = self.driver.disable_replication(ctx, group, volumes)
self.mock_client_service.delete_schedule.assert_called_once()
model_update = return_values[0]
self.assertEqual(model_update['replication_status'], 'disabled')
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP,
'default', '*', devices=REPL_DEVICES))
def test_time_to_secs(self):
time_secs = [('01:05', 3900), ('01:02:15am', 3735),
('03:07:20pm', 54440)]
for time, seconds in time_secs:
ret_secs = self.driver._time_to_secs(time)
self.assertEqual(ret_secs, seconds)
@mock.patch(NIMBLE_URLLIB2)
@mock.patch(NIMBLE_CLIENT)
@NimbleDriverBaseTestCase.client_mock_decorator(create_configuration(
NIMBLE_SAN_LOGIN, NIMBLE_SAN_PASS, NIMBLE_MANAGEMENT_IP,
'default', '*', devices=REPL_DEVICES))
def test_failover_replication(self):
ctx = context.get_admin_context()
group = mock.MagicMock()
volumes = [fake_volume.fake_volume_obj(None)]
return_values = self.driver.failover_replication(
ctx, group, volumes, 'secondary')
self.mock_client_service.handover.assert_called()
group_update = return_values[0]
self.assertEqual(group_update['replication_status'], 'failed-over')
return_values = self.driver.failover_replication(
ctx, group, volumes, 'default')
self.mock_client_service.handover.assert_called()
group_update = return_values[0]
self.assertEqual(group_update['replication_status'], 'enabled')
class NimbleDriverSnapshotTestCase(NimbleDriverBaseTestCase):

View File

@ -48,7 +48,7 @@ from cinder.volume import volume_utils
from cinder.zonemanager import utils as fczm_utils
DRIVER_VERSION = "4.2.0"
DRIVER_VERSION = "4.3.0"
AES_256_XTS_CIPHER = 'aes_256_xts'
DEFAULT_CIPHER = 'none'
EXTRA_SPEC_ENCRYPTION = 'nimble:encryption'
@ -134,6 +134,7 @@ class NimbleBaseVolumeDriver(san.SanDriver):
Added consistency groups support
4.2.0 - The Nimble driver is now located in the
cinder.volume.drivers.hpe module.
4.3.0 - Added group replication support
"""
VERSION = DRIVER_VERSION
@ -151,6 +152,9 @@ class NimbleBaseVolumeDriver(san.SanDriver):
self.verify = False
if self.configuration.nimble_verify_certificate is True:
self.verify = self.configuration.nimble_verify_cert_path or True
self.APIExecutor_remote_array = None
self.remote_array = {}
self._replicated_type = False
@staticmethod
def get_driver_options():
@ -167,6 +171,7 @@ class NimbleBaseVolumeDriver(san.SanDriver):
def create_volume(self, volume):
"""Create a new volume."""
reserve = not self.configuration.san_thin_provision
LOG.debug("Creating volume: %(name)s", {'name': volume['name']})
self.APIExecutor.create_vol(
volume,
self.configuration.nimble_pool_name, reserve,
@ -174,15 +179,24 @@ class NimbleBaseVolumeDriver(san.SanDriver):
self._group_target_enabled)
volume_type = volume.get('volume_type')
consis_group_snap_type = False
LOG.debug("volume_type: %(vol_type)s", {'vol_type': volume_type})
if volume_type is not None:
consis_group_snap_type = self.is_volume_group_snap_type(
volume_type)
LOG.debug("consis_group_snap_type: %(cg_type)s",
{'cg_type': consis_group_snap_type})
cg_id = volume.get('group_id', None)
LOG.debug("cg_id: %(cg_id)s", {'cg_id': cg_id})
if consis_group_snap_type and cg_id:
volume_id = self.APIExecutor.get_volume_id_by_name(volume['name'])
cg_volcoll_id = self.APIExecutor.get_volcoll_id_by_name(cg_id)
self.APIExecutor.associate_volcoll(volume_id, cg_volcoll_id)
return self._get_model_info(volume['name'])
model_info = self._get_model_info(volume['name'])
if self._replicated_type:
model_info['replication_status'] = 'enabled'
return model_info
def is_volume_backup_clone(self, volume):
"""check if the volume is created through cinder-backup workflow.
@ -231,7 +245,23 @@ class NimbleBaseVolumeDriver(san.SanDriver):
"""Delete the specified volume."""
backup_snap_name, backup_vol_name = self.is_volume_backup_clone(volume)
eventlet.sleep(DEFAULT_SLEEP)
if self._replicated_type:
group_id = self.APIExecutor_remote_array.get_group_id()
LOG.debug("group_id: %(id)s", {'id': group_id})
volume_id = self.APIExecutor_remote_array.get_volume_id_by_name(
volume['name'])
LOG.debug("volume_id: %(id)s", {'id': volume_id})
LOG.debug("claim vol on remote array")
self.APIExecutor_remote_array.claim_vol(volume_id, group_id)
LOG.debug("delete vol on remote array")
self.APIExecutor_remote_array.delete_vol(volume['name'])
# make the volume as offline
self.APIExecutor.online_vol(volume['name'], False)
LOG.debug("Deleting volume %(vol)s", {'vol': volume['name']})
@utils.retry(NimbleAPIException, retries=3)
@ -403,7 +433,9 @@ class NimbleBaseVolumeDriver(san.SanDriver):
QoS_support=False,
multiattach=True,
thin_provisioning_support=True,
consistent_group_snapshot_enabled=True)
consistent_group_snapshot_enabled=True,
consistent_group_replication_enabled=self._replicated_type,
replication_enabled=self._replicated_type)
self.group_stats['pools'] = [single_pool]
return self.group_stats
@ -577,10 +609,38 @@ class NimbleBaseVolumeDriver(san.SanDriver):
# offline the volume
self.APIExecutor.online_vol(vol_name, False)
def _do_replication_setup(self, array_id=None):
devices = self.configuration.replication_device
if devices:
dev = devices[0]
remote_array = dict(dev.items())
remote_array['san_login'] = (
dev.get('ssh_login', self.configuration.san_login))
remote_array['san_password'] = (
dev.get('san_password', self.configuration.san_password))
try:
self.APIExecutor_remote_array = NimbleRestAPIExecutor(
username=remote_array['san_login'],
password=remote_array['san_password'],
ip=remote_array['san_ip'],
verify=self.verify)
LOG.debug("created APIExecutor for remote ip: %(ip)s",
{'ip': remote_array['san_ip']})
except Exception:
LOG.error('Failed to create REST client.'
' Check san_ip, username, password'
' and make sure the array version is compatible')
raise
self._replicated_type = True
self.remote_array = remote_array
def do_setup(self, context):
"""Setup the Nimble Cinder volume driver."""
self._check_config()
# Setup API Executor
san_ip = self.configuration.san_ip
LOG.debug("san_ip: %(ip)s", {'ip': san_ip})
try:
self.APIExecutor = NimbleRestAPIExecutor(
username=self.configuration.san_login,
@ -596,6 +656,11 @@ class NimbleBaseVolumeDriver(san.SanDriver):
' and make sure the array version is compatible')
raise
self._update_existing_vols_agent_type(context)
self._do_replication_setup()
if self._replicated_type:
LOG.debug("for %(ip)s, schedule_name is: %(name)s",
{'ip': san_ip,
'name': self.remote_array['schedule_name']})
def _update_existing_vols_agent_type(self, context):
backend_name = self.configuration.safe_get('volume_backend_name')
@ -788,7 +853,7 @@ class NimbleBaseVolumeDriver(san.SanDriver):
cg_type = False
cg_name = group.id
description = group.description if group.description else group.name
LOG.info('Create group: %(name)s, description)s', {'name': cg_name,
LOG.info('Create group: %(name)s, %(description)s', {'name': cg_name,
'description': description})
for volume_type in group.volume_types:
if volume_type:
@ -804,7 +869,7 @@ class NimbleBaseVolumeDriver(san.SanDriver):
'="<is> True"')
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
self.APIExecutor.create_volcoll(cg_name)
self.APIExecutor.create_volcoll(cg_name, description)
return {'status': fields.GroupStatus.AVAILABLE}
def delete_group(self, context, group, volumes):
@ -945,6 +1010,168 @@ class NimbleBaseVolumeDriver(san.SanDriver):
raise NimbleAPIException(msg)
return None, None
def _time_to_secs(self, time):
# time is specified as 'HH:MM' or 'HH:MM:SS'
# qualified with am or pm, or in 24-hour clock
time = time.strip("'")
arr = time.split(':')
(hours, minutes) = (arr[0], arr[1])
total_secs = 0
if len(arr) == 2:
hours = int(hours)
if minutes.endswith('pm'):
# for time like 12:01pm, no need to add 12 to hours
if hours != 12:
# for other time like 01:05pm, we have add 12 to hours
hours += 12
minutes = minutes.strip('pm')
if minutes.endswith('am'):
minutes = minutes.strip('am')
minutes = int(minutes)
total_secs = hours * 3600 + minutes * 60
return total_secs
if len(arr) == 3:
seconds = arr[2]
hours = int(hours)
minutes = int(minutes)
if seconds.endswith('pm'):
# for time like 12:01:01pm, no need to add 12 to hours
if hours != 12:
# for other time like 01:05:05pm, we have add 12 to hours
hours += 12
seconds = seconds.strip('pm')
if seconds.endswith('am'):
seconds = seconds.strip('am')
seconds = int(seconds)
total_secs = hours * 3600 + minutes * 60 + seconds
return total_secs
def enable_replication(self, context, group, volumes):
LOG.debug("try to enable repl on group %(group)s", {'group': group.id})
if not group.is_replicated:
raise NotImplementedError()
model_update = {}
try:
# If replication is enabled for volume type, apply the schedule
nimble_group_name = group.id
san_ip = self.configuration.san_ip
# apply schedule
sched_name = self.remote_array['schedule_name']
partner_name = self.remote_array['downstream_partner']
LOG.debug("for %(ip)s, schedule_name is: %(name)s",
{'ip': san_ip, 'name': sched_name})
kwargs = {}
optionals = ['period', 'period_unit', 'num_retain',
'num_retain_replica', 'at_time', 'until_time',
'days', 'replicate_every', 'alert_threshold']
for key in optionals:
if key in self.remote_array:
value = self.remote_array[key]
kwargs[key] = value
if key == 'at_time' or key == 'until_time':
seconds = self._time_to_secs(value)
kwargs[key] = seconds
self.APIExecutor.set_schedule_for_volcoll(
sched_name, nimble_group_name, partner_name, **kwargs)
model_update.update({
'replication_status': fields.ReplicationStatus.ENABLED})
except Exception as e:
model_update.update({
'replication_status': fields.ReplicationStatus.ERROR})
LOG.error("Error enabling replication on group %(group)s. "
"Exception received: %(e)s.",
{'group': group.id, 'e': e})
return model_update, None
def disable_replication(self, context, group, volumes):
LOG.debug("try disable repl on group %(group)s", {'group': group.id})
if not group.is_replicated:
raise NotImplementedError()
model_update = {}
try:
san_ip = self.configuration.san_ip
sched_name = self.remote_array['schedule_name']
LOG.debug("for %(ip)s, schedule_name is: %(name)s",
{'ip': san_ip, 'name': sched_name})
data = self.APIExecutor.get_volcoll_details(group.id)
LOG.debug("data: %(data)s", {'data': data})
sched_id = data['schedule_list'][0]['id']
self.APIExecutor.delete_schedule(sched_id)
model_update.update({
'replication_status': fields.ReplicationStatus.DISABLED})
except Exception as e:
model_update.update({
'replication_status': fields.ReplicationStatus.ERROR})
LOG.error("Error disabling replication on group %(group)s. "
"Exception received: %(e)s.",
{'group': group.id, 'e': e})
return model_update, None
def failover_replication(self, context, group, volumes,
secondary_backend_id=None):
LOG.debug("try to failover/failback group %(group)s to %(backend)s",
{'group': group.id, 'backend': secondary_backend_id})
group_update = {}
volume_update_list = []
partner_name = secondary_backend_id
partner_id = None
if partner_name != 'default':
LOG.debug("failover to secondary array")
partner_id = self.APIExecutor.get_partner_id_by_name(partner_name)
LOG.debug("partner_id %(id)s", {'id': partner_id})
volcoll_id = self.APIExecutor.get_volcoll_id_by_name(group.id)
LOG.debug("volcoll_id %(id)s", {'id': volcoll_id})
self.APIExecutor.handover(volcoll_id, partner_id)
rep_status = fields.ReplicationStatus.FAILED_OVER
if partner_name == 'default':
LOG.debug("failback to primary array")
data = self.APIExecutor_remote_array.get_volcoll_details(group.id)
partner_name = data['replication_partner']
LOG.debug("partner_name: %(name)s", {'name': partner_name})
partner_id = self.APIExecutor_remote_array.get_partner_id_by_name(
partner_name)
LOG.debug("partner_id %(id)s", {'id': partner_id})
volcoll_id = self.APIExecutor_remote_array.get_volcoll_id_by_name(
group.id)
LOG.debug("volcoll_id %(id)s", {'id': volcoll_id})
self.APIExecutor_remote_array.handover(volcoll_id, partner_id)
rep_status = fields.ReplicationStatus.ENABLED
group_update['replication_status'] = rep_status
for vol in volumes:
volume_update = {
'id': vol.id,
'replication_status': rep_status}
volume_update_list.append(volume_update)
return group_update, volume_update_list
@interface.volumedriver
class NimbleISCSIDriver(NimbleBaseVolumeDriver, san.SanISCSIDriver):
@ -1827,6 +2054,15 @@ class NimbleRestAPIExecutor(object):
.format(volcoll_name))
return r.json()['data'][0]['id']
def get_volcoll_details(self, volcoll_name):
api = "volume_collections/detail"
filter = {"name": volcoll_name}
r = self.get_query(api, filter)
if not r.json()['data']:
raise Exception("Unable to retrieve information for volcoll: {0}"
.format(volcoll_name))
return r.json()['data'][0]
def get_snapcoll_id_by_name(self, snapcoll_name):
api = "snapshot_collections"
filter = {"name": snapcoll_name}
@ -1836,9 +2072,9 @@ class NimbleRestAPIExecutor(object):
.format(snapcoll_name))
return r.json()['data'][0]['id']
def create_volcoll(self, volcoll_name):
def create_volcoll(self, volcoll_name, description=''):
api = "volume_collections"
data = {"data": {"name": volcoll_name}}
data = {"data": {"name": volcoll_name, "description": description}}
r = self.post(api, data)
return r['data']
@ -2186,3 +2422,74 @@ class NimbleRestAPIExecutor(object):
api = "groups/" + str(group_id)
data = {'data': {'group_target_enabled': True}}
self.put(api, data)
def set_schedule_for_volcoll(self, sched_name, volcoll_name,
repl_partner,
period=1,
period_unit='days',
num_retain=10,
num_retain_replica=1,
at_time=0, # 00:00
until_time=86340, # 23:59
days='all',
replicate_every=1,
alert_threshold='24:00'):
volcoll_id = self.get_volcoll_id_by_name(volcoll_name)
api = "protection_schedules"
sched_details = {'name': sched_name,
'volcoll_or_prottmpl_type': "volume_collection",
'volcoll_or_prottmpl_id': volcoll_id,
'downstream_partner': repl_partner,
'period': period,
'period_unit': period_unit,
'num_retain': num_retain,
'num_retain_replica': num_retain_replica}
if at_time != 0:
sched_details['at_time'] = at_time
if until_time != 86340:
sched_details['until_time'] = until_time
if days != 'all':
sched_details['days'] = days
if replicate_every != 1:
sched_details['replicate_every'] = replicate_every
if alert_threshold != '24:00':
sched_details['alert_threshold'] = alert_threshold
data = {'data': sched_details}
r = self.post(api, data)
return r['data']
def delete_schedule(self, sched_id):
api = "protection_schedules/" + str(sched_id)
self.delete(api)
def claim_vol(self, volume_id, group_id):
api = "volumes/" + str(volume_id)
group_id = str(group_id)
data = {'data': {"owned_by_group_id": group_id
}
}
r = self.put(api, data)
return r
def get_partner_id_by_name(self, partner_name):
api = "replication_partners"
filter = {"name": partner_name}
r = self.get_query(api, filter)
if not r.json()['data']:
raise Exception("Unable to retrieve information for partner: {0}"
.format(partner_name))
return r.json()['data'][0]['id']
def handover(self, volcoll_id, partner_id):
volcoll_id = str(volcoll_id)
partner_id = str(partner_id)
api = "volume_collections/" + volcoll_id + "/actions/handover"
data = {'data': {"id": volcoll_id,
"replication_partner_id": partner_id
}
}
self.post(api, data)

View File

@ -44,6 +44,7 @@ Supported operations
* Volume Revert to Snapshot
* Create, list, update, and delete consistency groups
* Create, list, and delete consistency group snapshots
* Consistency group replication
Nimble and Alletra 6k Storage driver configuration
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -264,3 +265,105 @@ inspect the instance devices:
.. code-block:: console
# virsh dumpxml <Instance ID | Instance Name | Instance UUID>
Consistency group replication
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To enable consistency group replication, follow below steps:
1. Add `replication_device` to storage backend settings in `cinder.conf`, then
restart Cinder Volume service.
Example of `cinder.conf` for volume replications:
.. code-block:: ini
[nimble]
san_ip = xxx.xxx.xxx.xxx
...
replication_device = backend_id:nimblevsagroup2,
san_ip:10.132.239.66,
san_login:admin,
san_password:admin,
schedule_name:sched-one,
downstream_partner:nimblevsagroup2,
period:15,
period_unit:minutes
- Only one `replication_device` can be configured for each primary backend.
- Keys `backend_id`, `san_ip`, `san_login`, `san_password`, `schedule_name`
and `downstream_partner` are mandatory.
- Other parameters are optional (if not given, then default values
will be used):
period:1
period_unit:days
num_retain:10
num_retain_replica:1
at_time:'00:00'
until_time:'23:59'
days='all'
replicate_every:1
alert_threshold:'24:00'
2. Create a volume type with properties `replication_enabled='<is> True'`
and `consistent_group_snapshot_enabled='<is> True'`
.. code-block:: console
$ cinder type-create nimble
$ cinder type-key nimble set volume_backend_name='nimble'
$ cinder type-key nimble set replication_enabled='<is> True'
$ cinder type-key nimble set consistent_group_snapshot_enabled='<is> True'
3. Create a consistency group type with properties
`consistent_group_snapshot_enabled='<is> True'`
and `consistent_group_replication_enabled='<is> True'`.
.. code-block:: console
$ cinder --os-volume-api-version 3.38 group-type-create repl_type
$ cinder --os-volume-api-version 3.38 group-type-key repl_type set
consistent_group_snapshot_enabled='<is> True' consistent_group_replication_enabled='<is> True'
4. Create a group type with volume types support replication.
.. code-block:: console
$ cinder --os-volume-api-version 3.38 group-create --name grp_1 repl_type nimble
5. Create volume in the consistency group.
.. code-block:: console
$ cinder --os-volume-api-version 3.38 create --volume-type nimble --group-id {grp_1-id}
--name {volume-name} {size}
6. Enable consistency group replication.
.. code-block:: console
$ cinder --os-volume-api-version 3.38 group-enable-replication grp_1
7. Disable consistency group replication.
.. code-block:: console
$ cinder --os-volume-api-version 3.38 group-disable-replication grp_1
8. Failover consistency group replication.
.. code-block:: console
$ cinder --os-volume-api-version 3.38 group-failover-replication
--secondary-backend-id nimblevsagroup2 grp_1
9. Failback consistency group replication.
.. code-block:: console
$ cinder --os-volume-api-version 3.38 group-failover-replication
--secondary-backend-id default grp_1

View File

@ -504,7 +504,7 @@ driver.fungible=missing
driver.hitachi_vsp=missing
driver.hpe_3par=complete
driver.hpe_msa=missing
driver.hpe_nimble=missing
driver.hpe_nimble=complete
driver.hpe_xp=missing
driver.huawei_t_v1=missing
driver.huawei_t_v2=missing

View File

@ -0,0 +1,5 @@
---
features:
- |
HPE Nimble driver: Added group replication support.