From 53364e07a3c3da8dc51bb9d8733f42e82e6f82e0 Mon Sep 17 00:00:00 2001 From: yogesh Date: Thu, 24 Mar 2016 17:36:36 -0400 Subject: [PATCH] Handle manage/unmanage for replicated shares Managing a share with a share type that has replication_type extra_spec must be allowed. Drivers are expected to fail this operation if the share was part of a replication relationship that Manila does not know about. Unmanaging a share with replicas must not be permitted until all replicas are removed. Managing and unmanaging of snapshots must not be permitted for a share that has replicas. Modify the NetApp driver for manage_existing to check for existing replicas. Also fix issue with manage retry where the share data was being altered inappropriately by a DB API. Closes-Bug: #1561641 Closes-Bug: #1565903 Co-Authored-By: Goutham Pacha Ravi Change-Id: I82f1fef1e30114e017efd00fa7da70aceecab94c --- manila/api/v1/share_unmanage.py | 4 + manila/api/v2/share_snapshots.py | 8 +- manila/db/sqlalchemy/api.py | 52 +++-- manila/share/api.py | 13 +- manila/share/driver.py | 6 +- .../netapp/dataontap/client/client_cmode.py | 27 +++ .../netapp/dataontap/cluster_mode/lib_base.py | 7 + manila/share/manager.py | 5 + manila/tests/api/v1/test_share_unmanage.py | 22 ++ manila/tests/api/v2/test_share_snapshots.py | 34 +++- manila/tests/fake_share.py | 1 + .../drivers/netapp/dataontap/client/fakes.py | 9 + .../dataontap/client/test_client_cmode.py | 66 ++++++ .../dataontap/cluster_mode/test_lib_base.py | 22 ++ manila/tests/share/test_api.py | 121 ++++++++++- manila/tests/share/test_manager.py | 13 +- .../api/admin/test_replication_actions.py | 189 ++++++++++++++++++ ...age-replicated-share-fa90ce34372b6df5.yaml | 12 ++ 18 files changed, 579 insertions(+), 32 deletions(-) create mode 100644 manila_tempest_tests/tests/api/admin/test_replication_actions.py create mode 100644 releasenotes/notes/manage-unmanage-replicated-share-fa90ce34372b6df5.yaml diff --git a/manila/api/v1/share_unmanage.py b/manila/api/v1/share_unmanage.py index 52859b921d..d511deaff1 100644 --- a/manila/api/v1/share_unmanage.py +++ b/manila/api/v1/share_unmanage.py @@ -37,6 +37,10 @@ class ShareUnmanageMixin(object): try: share = self.share_api.get(context, id) + if share.get('has_replicas'): + msg = _("Share %s has replicas. It cannot be unmanaged " + "until all replicas are removed.") % share['id'] + raise exc.HTTPConflict(explanation=msg) if share['instance'].get('share_server_id'): msg = _("Operation 'unmanage' is not supported for shares " "that are created on top of share servers " diff --git a/manila/api/v2/share_snapshots.py b/manila/api/v2/share_snapshots.py index 1b0103f36e..8775b868d8 100644 --- a/manila/api/v2/share_snapshots.py +++ b/manila/api/v2/share_snapshots.py @@ -59,6 +59,11 @@ class ShareSnapshotsController(share_snapshots.ShareSnapshotMixin, "snapshots of shares that are created with share" " servers (created with share-networks).") raise exc.HTTPForbidden(explanation=msg) + elif share.get('has_replicas'): + msg = _("Share %s has replicas. Snapshots of this share " + "cannot currently be unmanaged until all replicas " + "are removed.") % share['id'] + raise exc.HTTPConflict(explanation=msg) elif snapshot['status'] in constants.TRANSITIONAL_STATUSES: msg = _("Snapshot with transitional state cannot be " "unmanaged. Snapshot '%(s_id)s' is in '%(state)s' " @@ -118,7 +123,8 @@ class ShareSnapshotsController(share_snapshots.ShareSnapshotMixin, driver_options) except (exception.ShareNotFound, exception.ShareSnapshotNotFound) as e: raise exc.HTTPNotFound(explanation=six.text_type(e)) - except exception.ManageInvalidShareSnapshot as e: + except (exception.InvalidShare, + exception.ManageInvalidShareSnapshot) as e: raise exc.HTTPConflict(explanation=six.text_type(e)) return self._view_builder.detail(req, snapshot_ref) diff --git a/manila/db/sqlalchemy/api.py b/manila/db/sqlalchemy/api.py index 473a0cbddd..5ece1001a0 100644 --- a/manila/db/sqlalchemy/api.py +++ b/manila/db/sqlalchemy/api.py @@ -1109,27 +1109,35 @@ def reservation_expire(context): ################ -def extract_instance_values(values, fields): +def extract_instance_values(values_to_extract, fields): + values = copy.deepcopy(values_to_extract) instance_values = {} for field in fields: field_value = values.pop(field, None) if field_value: instance_values.update({field: field_value}) - return instance_values + return instance_values, values def extract_share_instance_values(values): share_instance_model_fields = [ 'status', 'host', 'scheduled_at', 'launched_at', 'terminated_at', - 'share_server_id', 'share_network_id', 'availability_zone' + 'share_server_id', 'share_network_id', 'availability_zone', + 'replica_state', ] - return extract_instance_values(values, share_instance_model_fields) + share_instance_values, share_values = ( + extract_instance_values(values, share_instance_model_fields) + ) + return share_instance_values, share_values def extract_snapshot_instance_values(values): fields = ['status', 'progress', 'provider_location'] - return extract_instance_values(values, fields) + snapshot_instance_values, snapshot_values = ( + extract_instance_values(values, fields) + ) + return snapshot_instance_values, snapshot_values ################ @@ -1480,10 +1488,10 @@ def share_create(context, share_values, create_share_instance=True): models.ShareMetadata) session = get_session() share_ref = models.Share() - share_instance_values = extract_share_instance_values(values) + share_instance_values, share_values = extract_share_instance_values(values) ensure_availability_zone_exists(context, share_instance_values, session, strict=False) - share_ref.update(values) + share_ref.update(share_values) with session.begin(): share_ref.save(session=session) @@ -1514,10 +1522,11 @@ def share_data_get_for_project(context, project_id, user_id, session=None): @require_context @oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True) -def share_update(context, share_id, values): +def share_update(context, share_id, update_values): session = get_session() + values = copy.deepcopy(update_values) - share_instance_values = extract_share_instance_values(values) + share_instance_values, share_values = extract_share_instance_values(values) ensure_availability_zone_exists(context, share_instance_values, session, strict=False) @@ -1527,7 +1536,7 @@ def share_update(context, share_id, values): _share_instance_update(context, share_ref.instance['id'], share_instance_values, session=session) - share_ref.update(values) + share_ref.update(share_values) share_ref.save(session=session) return share_ref @@ -2024,17 +2033,21 @@ def _set_share_snapshot_instance_data(context, snapshot_instances, session): @require_context -def share_snapshot_create(context, values, create_snapshot_instance=True): +def share_snapshot_create(context, create_values, + create_snapshot_instance=True): + values = copy.deepcopy(create_values) values = ensure_model_dict_has_id(values) snapshot_ref = models.ShareSnapshot() - snapshot_instance_values = extract_snapshot_instance_values(values) - share_ref = share_get(context, values.get('share_id')) + snapshot_instance_values, snapshot_values = ( + extract_snapshot_instance_values(values) + ) + share_ref = share_get(context, snapshot_values.get('share_id')) snapshot_instance_values.update( {'share_instance_id': share_ref.instance.id} ) - snapshot_ref.update(values) + snapshot_ref.update(snapshot_values) session = get_session() with session.begin(): snapshot_ref.save(session=session) @@ -2046,7 +2059,8 @@ def share_snapshot_create(context, values, create_snapshot_instance=True): snapshot_instance_values, session=session ) - return share_snapshot_get(context, values['id'], session=session) + return share_snapshot_get( + context, snapshot_values['id'], session=session) @require_admin_context @@ -2198,10 +2212,12 @@ def share_snapshot_update(context, snapshot_id, values): snapshot_ref = share_snapshot_get(context, snapshot_id, session=session) - instance_values = extract_snapshot_instance_values(values) + instance_values, snapshot_values = ( + extract_snapshot_instance_values(values) + ) - if values: - snapshot_ref.update(values) + if snapshot_values: + snapshot_ref.update(snapshot_values) snapshot_ref.save(session=session) if instance_values: diff --git a/manila/share/api.py b/manila/share/api.py index df1cf63021..25ba2f07b8 100644 --- a/manila/share/api.py +++ b/manila/share/api.py @@ -505,15 +505,15 @@ class API(base.Base): 'share_type_id': share_data['share_type_id'] }) - share_type = {} share_type_id = share_data['share_type_id'] - if share_type_id: - share_type = share_types.get_share_type(context, share_type_id) + share_type = share_types.get_share_type(context, share_type_id) snapshot_support = strutils.bool_from_string( share_type.get('extra_specs', {}).get( 'snapshot_support', True) if share_type else True, strict=True) + replication_type = share_type.get('extra_specs', {}).get( + 'replication_type') share_data.update({ 'user_id': context.user_id, @@ -521,6 +521,7 @@ class API(base.Base): 'status': constants.STATUS_MANAGING, 'scheduled_at': timeutils.utcnow(), 'snapshot_support': snapshot_support, + 'replication_type': replication_type, }) LOG.debug("Manage: Found shares %s.", len(shares)) @@ -621,6 +622,12 @@ class API(base.Base): except exception.NotFound: raise exception.ShareNotFound(share_id=snapshot_data['share_id']) + if share['has_replicas']: + msg = (_("Share %s has replicas. Snapshots of this share cannot " + "currently be managed until all replicas are removed.") + % share['id']) + raise exception.InvalidShare(reason=msg) + existing_snapshots = self.db.share_snapshot_get_all_for_share( context, snapshot_data['share_id']) diff --git a/manila/share/driver.py b/manila/share/driver.py index 2b9759bfdb..53e2e39061 100644 --- a/manila/share/driver.py +++ b/manila/share/driver.py @@ -637,9 +637,13 @@ class ShareDriver(object): def manage_existing(self, share, driver_options): """Brings an existing share under Manila management. - If provided share is not valid, then raise a + If the provided share is not valid, then raise a ManageInvalidShare exception, specifying a reason for the failure. + If the provided share is not in a state that can be managed, such as + being replicated on the backend, the driver *MUST* raise + ManageInvalidShare exception with an appropriate message. + The share has a share_type, and the driver can inspect that and compare against the properties of the referenced backend share. If they are incompatible, raise a diff --git a/manila/share/drivers/netapp/dataontap/client/client_cmode.py b/manila/share/drivers/netapp/dataontap/client/client_cmode.py index 9c90d36e94..0d49b889ff 100644 --- a/manila/share/drivers/netapp/dataontap/client/client_cmode.py +++ b/manila/share/drivers/netapp/dataontap/client/client_cmode.py @@ -1579,6 +1579,7 @@ class NetAppCmodeClient(client_base.NetAppBaseClient): 'name': None, 'type': None, 'style': None, + 'owning-vserver-name': None, }, 'volume-space-attributes': { 'size': None, @@ -1607,6 +1608,8 @@ class NetAppCmodeClient(client_base.NetAppBaseClient): 'name': volume_id_attributes.get_child_content('name'), 'type': volume_id_attributes.get_child_content('type'), 'style': volume_id_attributes.get_child_content('style'), + 'owning-vserver-name': volume_id_attributes.get_child_content( + 'owning-vserver-name'), 'size': volume_space_attributes.get_child_content('size'), } return volume @@ -2914,3 +2917,27 @@ class NetAppCmodeClient(client_base.NetAppBaseClient): snapmirrors.append(snapmirror) return snapmirrors + + def volume_has_snapmirror_relationships(self, volume): + """Return True if snapmirror relationships exist for a given volume. + + If we have snapmirror control plane license, we can verify whether + the given volume is part of any snapmirror relationships. + """ + try: + # Check if volume is a source snapmirror volume + snapmirrors = self.get_snapmirrors( + volume['owning-vserver-name'], volume['name'], None, None) + # Check if volume is a destination snapmirror volume + if not snapmirrors: + snapmirrors = self.get_snapmirrors( + None, None, volume['owning-vserver-name'], volume['name']) + + has_snapmirrors = len(snapmirrors) > 0 + except netapp_api.NaApiError: + msg = _LE("Could not determine if volume %s is part of " + "existing snapmirror relationships.") + LOG.exception(msg, volume['name']) + has_snapmirrors = False + + return has_snapmirrors diff --git a/manila/share/drivers/netapp/dataontap/cluster_mode/lib_base.py b/manila/share/drivers/netapp/dataontap/cluster_mode/lib_base.py index f5287878a0..4a5b2169b8 100644 --- a/manila/share/drivers/netapp/dataontap/cluster_mode/lib_base.py +++ b/manila/share/drivers/netapp/dataontap/cluster_mode/lib_base.py @@ -800,6 +800,7 @@ class NetAppCmodeFileStorageLibrary(object): # Get existing volume info volume = vserver_client.get_volume_to_manage(aggregate_name, volume_name) + if not volume: msg = _('Volume %(volume)s not found on aggregate %(aggr)s.') msg_args = {'volume': volume_name, 'aggr': aggregate_name} @@ -866,6 +867,12 @@ class NetAppCmodeFileStorageLibrary(object): msg_args = {'volume': volume['name']} raise exception.ManageInvalidShare(reason=msg % msg_args) + if vserver_client.volume_has_snapmirror_relationships(volume): + msg = _('Volume %(volume)s must not be in any snapmirror ' + 'relationships.') + msg_args = {'volume': volume['name']} + raise exception.ManageInvalidShare(reason=msg % msg_args) + @na_utils.trace def create_consistency_group(self, context, cg_dict, share_server=None): """Creates a consistency group. diff --git a/manila/share/manager.py b/manila/share/manager.py index 7048add3b5..8e2a8e0416 100644 --- a/manila/share/manager.py +++ b/manila/share/manager.py @@ -1566,6 +1566,11 @@ class ShareManager(manager.SchedulerDependentManager): 'availability_zone': CONF.storage_availability_zone, }) + # If the share was managed with `replication_type` extra-spec, the + # instance becomes an `active` replica. + if share_ref.get('replication_type'): + share_update['replica_state'] = constants.REPLICA_STATE_ACTIVE + # NOTE(vponomaryov): we should keep only those export locations # that driver has calculated to avoid incompatibilities with one # provided by user. diff --git a/manila/tests/api/v1/test_share_unmanage.py b/manila/tests/api/v1/test_share_unmanage.py index 0a0027718e..6f9490f5e2 100644 --- a/manila/tests/api/v1/test_share_unmanage.py +++ b/manila/tests/api/v1/test_share_unmanage.py @@ -98,6 +98,28 @@ class ShareUnmanageTest(test.TestCase): self.mock_policy_check.assert_called_once_with( self.context, self.resource_name, 'unmanage') + def test_unmanage_share_that_has_replicas(self): + share = dict(status=constants.STATUS_AVAILABLE, id='foo_id', + instance={}, has_replicas=True) + mock_api_unmanage = self.mock_object(self.controller.share_api, + 'unmanage') + mock_db_snapshots_get = self.mock_object( + self.controller.share_api.db, 'share_snapshot_get_all_for_share') + self.mock_object( + self.controller.share_api, 'get', + mock.Mock(return_value=share)) + + self.assertRaises( + webob.exc.HTTPConflict, + self.controller.unmanage, self.request, share['id']) + + self.assertFalse(mock_api_unmanage.called) + self.assertFalse(mock_db_snapshots_get.called) + self.controller.share_api.get.assert_called_once_with( + self.request.environ['manila.context'], share['id']) + self.mock_policy_check.assert_called_once_with( + self.context, self.resource_name, 'unmanage') + def test_unmanage_share_based_on_share_server(self): share = dict(instance=dict(share_server_id='foo_id'), id='bar_id') self.mock_object( diff --git a/manila/tests/api/v2/test_share_snapshots.py b/manila/tests/api/v2/test_share_snapshots.py index c72fb8accf..fe2a645347 100644 --- a/manila/tests/api/v2/test_share_snapshots.py +++ b/manila/tests/api/v2/test_share_snapshots.py @@ -486,7 +486,8 @@ class ShareSnapshotAdminActionsAPITest(test.TestCase): @ddt.data(exception.ShareNotFound(share_id='fake'), exception.ShareSnapshotNotFound(snapshot_id='fake'), - exception.ManageInvalidShareSnapshot(reason='error')) + exception.ManageInvalidShareSnapshot(reason='error'), + exception.InvalidShare(reason='error')) def test_manage_exception(self, exception_type): self.mock_policy_check = self.mock_object( policy, 'check_policy', mock.Mock(return_value=True)) @@ -497,10 +498,12 @@ class ShareSnapshotAdminActionsAPITest(test.TestCase): share_api.API, 'manage_snapshot', mock.Mock( side_effect=exception_type)) - if isinstance(exception_type, exception.ManageInvalidShareSnapshot): + http_ex = webob.exc.HTTPNotFound + + if (isinstance(exception_type, exception.ManageInvalidShareSnapshot) + or isinstance(exception_type, exception.InvalidShare)): http_ex = webob.exc.HTTPConflict - else: - http_ex = webob.exc.HTTPNotFound + self.assertRaises(http_ex, self.controller.manage, self.manage_request, body) @@ -544,6 +547,29 @@ class ShareSnapshotAdminActionsAPITest(test.TestCase): self.unmanage_request.environ['manila.context'], self.resource_name, 'unmanage_snapshot') + def test_snapshot_unmanage_replicated_snapshot(self): + self.mock_policy_check = self.mock_object( + policy, 'check_policy', mock.Mock(return_value=True)) + share = {'status': constants.STATUS_AVAILABLE, 'id': 'bar_id', + 'has_replicas': True} + self.mock_object(share_api.API, 'get', mock.Mock(return_value=share)) + snapshot = {'status': constants.STATUS_AVAILABLE, 'id': 'foo_id', + 'share_id': 'bar_id'} + self.mock_object(share_api.API, 'get_snapshot', + mock.Mock(return_value=snapshot)) + + self.assertRaises(webob.exc.HTTPConflict, + self.controller.unmanage, + self.unmanage_request, + snapshot['id']) + self.controller.share_api.get_snapshot.assert_called_once_with( + self.unmanage_request.environ['manila.context'], snapshot['id']) + self.controller.share_api.get.assert_called_once_with( + self.unmanage_request.environ['manila.context'], share['id']) + self.mock_policy_check.assert_called_once_with( + self.unmanage_request.environ['manila.context'], + self.resource_name, 'unmanage_snapshot') + @ddt.data(*constants.TRANSITIONAL_STATUSES) def test_snapshot_unmanage_with_transitional_state(self, status): self.mock_policy_check = self.mock_object( diff --git a/manila/tests/fake_share.py b/manila/tests/fake_share.py index 9e2b1a7269..6cc95e5a4f 100644 --- a/manila/tests/fake_share.py +++ b/manila/tests/fake_share.py @@ -124,6 +124,7 @@ def fake_snapshot(create_instance=False, **kwargs): snapshot['instance']['provider_location'] ) snapshot['progress'] = snapshot['instance']['progress'] + snapshot['instances'] = snapshot['instance'], else: snapshot['status'] = constants.STATUS_AVAILABLE snapshot['progress'] = '0%' diff --git a/manila/tests/share/drivers/netapp/dataontap/client/fakes.py b/manila/tests/share/drivers/netapp/dataontap/client/fakes.py index 66317b0e62..a97846ac89 100644 --- a/manila/tests/share/drivers/netapp/dataontap/client/fakes.py +++ b/manila/tests/share/drivers/netapp/dataontap/client/fakes.py @@ -1955,3 +1955,12 @@ FAKE_RESULT_SUCCESS = api.NaElement('result') FAKE_RESULT_SUCCESS.add_attr('status', 'passed') FAKE_HTTP_OPENER = urllib.request.build_opener() + +FAKE_MANAGE_VOLUME = { + 'aggregate': SHARE_AGGREGATE_NAME, + 'name': SHARE_NAME, + 'owning-vserver-name': VSERVER_NAME, + 'junction_path': VOLUME_JUNCTION_PATH, + 'style': 'fake_style', + 'size': SHARE_SIZE, +} diff --git a/manila/tests/share/drivers/netapp/dataontap/client/test_client_cmode.py b/manila/tests/share/drivers/netapp/dataontap/client/test_client_cmode.py index 6a4b906228..4d4ce01579 100644 --- a/manila/tests/share/drivers/netapp/dataontap/client/test_client_cmode.py +++ b/manila/tests/share/drivers/netapp/dataontap/client/test_client_cmode.py @@ -2840,6 +2840,7 @@ class NetAppClientCmodeTestCase(test.TestCase): 'name': None, 'type': None, 'style': None, + 'owning-vserver-name': None, }, 'volume-space-attributes': { 'size': None, @@ -2854,6 +2855,7 @@ class NetAppClientCmodeTestCase(test.TestCase): 'type': 'rw', 'style': 'flex', 'size': fake.SHARE_SIZE, + 'owning-vserver-name': fake.VSERVER_NAME } self.client.send_iter_request.assert_has_calls([ mock.call('volume-get-iter', volume_get_iter_args)]) @@ -5014,3 +5016,67 @@ class NetAppClientCmodeTestCase(test.TestCase): } self.client.send_request.assert_has_calls([ mock.call('snapmirror-resync', snapmirror_resync_args)]) + + @ddt.data('source', 'destination', None) + def test_volume_has_snapmirror_relationships(self, snapmirror_rel_type): + """Snapmirror relationships can be both ways.""" + + vol = fake.FAKE_MANAGE_VOLUME + snapmirror = { + 'source-vserver': fake.SM_SOURCE_VSERVER, + 'source-volume': fake.SM_SOURCE_VOLUME, + 'destination-vserver': fake.SM_DEST_VSERVER, + 'destination-volume': fake.SM_DEST_VOLUME, + 'is-healthy': 'true', + 'mirror-state': 'snapmirrored', + 'schedule': 'daily', + } + expected_get_snapmirrors_call_count = 2 + expected_get_snapmirrors_calls = [ + mock.call(vol['owning-vserver-name'], vol['name'], None, None), + mock.call(None, None, vol['owning-vserver-name'], vol['name']), + ] + if snapmirror_rel_type is None: + side_effect = ([], []) + elif snapmirror_rel_type == 'source': + snapmirror['source-vserver'] = vol['owning-vserver-name'] + snapmirror['source-volume'] = vol['name'] + side_effect = ([snapmirror], None) + expected_get_snapmirrors_call_count = 1 + expected_get_snapmirrors_calls.pop() + else: + snapmirror['destination-vserver'] = vol['owning-vserver-name'] + snapmirror['destination-volume'] = vol['name'] + side_effect = (None, [snapmirror]) + mock_get_snapmirrors_call = self.mock_object( + self.client, 'get_snapmirrors', mock.Mock(side_effect=side_effect)) + mock_exc_log = self.mock_object(client_cmode.LOG, 'exception') + expected_retval = True if snapmirror_rel_type else False + + retval = self.client.volume_has_snapmirror_relationships(vol) + + self.assertEqual(expected_retval, retval) + self.assertEqual(expected_get_snapmirrors_call_count, + mock_get_snapmirrors_call.call_count) + mock_get_snapmirrors_call.assert_has_calls( + expected_get_snapmirrors_calls) + self.assertFalse(mock_exc_log.called) + + def test_volume_has_snapmirror_relationships_api_error(self): + + vol = fake.FAKE_MANAGE_VOLUME + expected_get_snapmirrors_calls = [ + mock.call(vol['owning-vserver-name'], vol['name'], None, None), + ] + mock_get_snapmirrors_call = self.mock_object( + self.client, 'get_snapmirrors', mock.Mock( + side_effect=self._mock_api_error(netapp_api.EINTERNALERROR))) + mock_exc_log = self.mock_object(client_cmode.LOG, 'exception') + + retval = self.client.volume_has_snapmirror_relationships(vol) + + self.assertFalse(retval) + self.assertEqual(1, mock_get_snapmirrors_call.call_count) + mock_get_snapmirrors_call.assert_has_calls( + expected_get_snapmirrors_calls) + self.assertTrue(mock_exc_log.called) diff --git a/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_base.py b/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_base.py index 1c0e8a093f..88e44cc712 100644 --- a/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_base.py +++ b/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_base.py @@ -1451,6 +1451,8 @@ class NetAppFileStorageLibraryTestCase(test.TestCase): vserver_client.volume_has_luns = mock.Mock(return_value=False) vserver_client.volume_has_junctioned_volumes = mock.Mock( return_value=False) + vserver_client.volume_has_snapmirror_relationships = mock.Mock( + return_value=False) result = self.library._validate_volume_for_manage( fake.FLEXVOL_TO_MANAGE, vserver_client) @@ -1474,6 +1476,8 @@ class NetAppFileStorageLibraryTestCase(test.TestCase): vserver_client.volume_has_luns = mock.Mock(return_value=False) vserver_client.volume_has_junctioned_volumes = mock.Mock( return_value=False) + vserver_client.volume_has_snapmirror_relationships = mock.Mock( + return_value=False) self.assertRaises(exception.ManageInvalidShare, self.library._validate_volume_for_manage, @@ -1486,6 +1490,8 @@ class NetAppFileStorageLibraryTestCase(test.TestCase): vserver_client.volume_has_luns = mock.Mock(return_value=True) vserver_client.volume_has_junctioned_volumes = mock.Mock( return_value=False) + vserver_client.volume_has_snapmirror_relationships = mock.Mock( + return_value=False) self.assertRaises(exception.ManageInvalidShare, self.library._validate_volume_for_manage, @@ -1498,6 +1504,22 @@ class NetAppFileStorageLibraryTestCase(test.TestCase): vserver_client.volume_has_luns = mock.Mock(return_value=False) vserver_client.volume_has_junctioned_volumes = mock.Mock( return_value=True) + vserver_client.volume_has_snapmirror_relationships = mock.Mock( + return_value=False) + + self.assertRaises(exception.ManageInvalidShare, + self.library._validate_volume_for_manage, + fake.FLEXVOL_TO_MANAGE, + vserver_client) + + def test_validate_volume_for_manage_snapmirror_relationships_present(self): + + vserver_client = mock.Mock() + vserver_client.volume_has_luns = mock.Mock(return_value=False) + vserver_client.volume_has_junctioned_volumes = mock.Mock( + return_value=False) + vserver_client.volume_has_snapmirror_relationships = mock.Mock( + return_value=True) self.assertRaises(exception.ManageInvalidShare, self.library._validate_volume_for_manage, diff --git a/manila/tests/share/test_api.py b/manila/tests/share/test_api.py index 8eb8186271..8da0d74aef 100644 --- a/manila/tests/share/test_api.py +++ b/manila/tests/share/test_api.py @@ -728,7 +728,8 @@ class ShareAPITestCase(test.TestCase): self.context, request_spec=mock.ANY, filter_properties={}) self.assertFalse(self.api.share_rpcapi.create_share_instance.called) - def test_manage_new(self): + @ddt.data('dr', 'readable', None) + def test_manage_new(self, replication_type): share_data = { 'host': 'fake', 'export_location': 'fake', @@ -746,6 +747,7 @@ class ShareAPITestCase(test.TestCase): 'id': 'fake_type_id', 'extra_specs': { 'snapshot_support': False, + 'replication_type': replication_type, }, } @@ -770,6 +772,7 @@ class ShareAPITestCase(test.TestCase): 'status': constants.STATUS_MANAGING, 'scheduled_at': date, 'snapshot_support': fake_type['extra_specs']['snapshot_support'], + 'replication_type': replication_type, }) expected_request_spec = self._get_request_spec_dict( @@ -800,6 +803,7 @@ class ShareAPITestCase(test.TestCase): 'id': 'fake_type_id', 'extra_specs': { 'snapshot_support': False, + 'replication_type': 'writable', }, } @@ -969,6 +973,121 @@ class ShareAPITestCase(test.TestCase): db_api.share_snapshot_create.assert_called_once_with( self.context, options) + def test_manage_snapshot_share_not_found(self): + snapshot = fakes.fake_snapshot(share_id='fake_share', + as_primitive=True) + mock_share_get_call = self.mock_object( + db_api, 'share_get', mock.Mock(side_effect=exception.NotFound)) + mock_db_snapshot_call = self.mock_object( + db_api, 'share_snapshot_get_all_for_share') + + self.assertRaises(exception.ShareNotFound, + self.api.manage_snapshot, + self.context, + snapshot, + {}) + self.assertFalse(mock_db_snapshot_call.called) + mock_share_get_call.assert_called_once_with( + self.context, snapshot['share_id']) + + def test_manage_snapshot_share_has_replicas(self): + share_ref = fakes.fake_share( + has_replicas=True, status=constants.STATUS_AVAILABLE) + self.mock_object( + db_api, 'share_get', mock.Mock(return_value=share_ref)) + snapshot = fakes.fake_snapshot(create_instance=True, as_primitive=True) + mock_db_snapshot_get_all_for_share_call = self.mock_object( + db_api, 'share_snapshot_get_all_for_share') + + self.assertRaises(exception.InvalidShare, + self.api.manage_snapshot, + context, + snapshot, + {}) + self.assertFalse(mock_db_snapshot_get_all_for_share_call.called) + + def test_manage_snapshot_already_managed(self): + share_ref = fakes.fake_share( + has_replicas=False, status=constants.STATUS_AVAILABLE) + snapshot = fakes.fake_snapshot(create_instance=True, as_primitive=True) + self.mock_object( + db_api, 'share_get', mock.Mock(return_value=share_ref)) + mock_db_snapshot_call = self.mock_object( + db_api, 'share_snapshot_get_all_for_share', mock.Mock( + return_value=[snapshot])) + mock_db_snapshot_create_call = self.mock_object( + db_api, 'share_snapshot_create') + + self.assertRaises(exception.ManageInvalidShareSnapshot, + self.api.manage_snapshot, + self.context, + snapshot, + {}) + mock_db_snapshot_call.assert_called_once_with( + self.context, snapshot['share_id']) + self.assertFalse(mock_db_snapshot_create_call.called) + + def test_manage_snapshot(self): + share_ref = fakes.fake_share( + has_replicas=False, status=constants.STATUS_AVAILABLE, + host='fake_host') + existing_snapshot = fakes.fake_snapshot( + create_instance=True, share_id=share_ref['id']) + self.mock_object(db_api, 'share_snapshot_get_all_for_share', + mock.Mock(return_value=[existing_snapshot])) + snapshot_data = { + 'share_id': share_ref['id'], + 'provider_location': 'someproviderlocation', + } + expected_snapshot_data = { + 'user_id': self.context.user_id, + 'project_id': self.context.project_id, + 'status': constants.STATUS_MANAGING, + 'share_size': share_ref['size'], + 'progress': '0%', + 'share_proto': share_ref['share_proto'], + } + expected_snapshot_data.update(**snapshot_data) + snapshot = fakes.fake_snapshot( + create_instance=True, **expected_snapshot_data) + self.mock_object( + db_api, 'share_get', mock.Mock(return_value=share_ref)) + mock_db_snapshot_create_call = self.mock_object( + db_api, 'share_snapshot_create', mock.Mock(return_value=snapshot)) + mock_rpc_call = self.mock_object(self.share_rpcapi, 'manage_snapshot', + mock.Mock(return_value=snapshot)) + + new_snap = self.api.manage_snapshot( + self.context, snapshot_data, {}) + + self.assertEqual(new_snap, snapshot) + mock_db_snapshot_create_call.assert_called_once_with( + self.context, expected_snapshot_data) + mock_rpc_call.assert_called_once_with( + self.context, snapshot, share_ref['host'], {}) + + def test_unmanage_snapshot(self): + fake_host = 'fake_host' + snapshot_data = { + 'status': constants.STATUS_UNMANAGING, + 'terminated_at': timeutils.utcnow(), + } + snapshot = fakes.fake_snapshot( + create_instance=True, share_instance_id='id2', **snapshot_data) + mock_db_snap_update_call = self.mock_object( + db_api, 'share_snapshot_update', mock.Mock(return_value=snapshot)) + mock_rpc_call = self.mock_object( + self.share_rpcapi, 'unmanage_snapshot') + + retval = self.api.unmanage_snapshot( + self.context, snapshot, fake_host) + + self.assertIsNone(retval) + mock_db_snap_update_call.assert_called_once_with( + self.context, snapshot['id'], snapshot_data) + mock_rpc_call.assert_called_once_with( + self.context, snapshot, fake_host) + def test_create_snapshot_for_replicated_share(self): share = fakes.fake_share( has_replicas=True, status=constants.STATUS_AVAILABLE) diff --git a/manila/tests/share/test_manager.py b/manila/tests/share/test_manager.py index 77b8b29c32..a2bd3b9f04 100644 --- a/manila/tests/share/test_manager.py +++ b/manila/tests/share/test_manager.py @@ -2136,10 +2136,13 @@ class ShareManagerTestCase(test.TestCase): share['project_id'], {'shares': 1, 'gigabytes': 3}) @ddt.data( - {'size': 1}, - {'size': 2, 'name': 'fake'}, - {'size': 3, 'export_locations': ['foo', 'bar', 'quuz']}) + {'size': 1, 'replication_type': None}, + {'size': 2, 'name': 'fake', 'replication_type': 'dr'}, + {'size': 3, 'export_locations': ['foo', 'bar', 'quuz'], + 'replication_type': 'writable'}, + ) def test_manage_share_valid_share(self, driver_data): + replication_type = driver_data.pop('replication_type') export_locations = driver_data.get('export_locations') self.mock_object(self.share_manager.db, 'share_update', mock.Mock()) self.mock_object(self.share_manager, 'driver', mock.Mock()) @@ -2157,7 +2160,7 @@ class ShareManagerTestCase(test.TestCase): self.mock_object(self.share_manager.driver, "manage_existing", mock.Mock(return_value=driver_data)) - share = db_utils.create_share() + share = db_utils.create_share(replication_type=replication_type) share_id = share['id'] driver_options = {'fake': 'fake'} @@ -2175,6 +2178,8 @@ class ShareManagerTestCase(test.TestCase): self.share_manager.db.share_export_locations_update.called) valid_share_data = { 'status': constants.STATUS_AVAILABLE, 'launched_at': mock.ANY} + if replication_type: + valid_share_data['replica_state'] = constants.REPLICA_STATE_ACTIVE valid_share_data.update(driver_data) self.share_manager.db.share_update.assert_called_once_with( utils.IsAMatcher(context.RequestContext), diff --git a/manila_tempest_tests/tests/api/admin/test_replication_actions.py b/manila_tempest_tests/tests/api/admin/test_replication_actions.py new file mode 100644 index 0000000000..6ca2f0c08e --- /dev/null +++ b/manila_tempest_tests/tests/api/admin/test_replication_actions.py @@ -0,0 +1,189 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from tempest import config +from tempest.lib.common.utils import data_utils +from tempest.lib import exceptions as lib_exc +from tempest import test +import testtools + +from manila_tempest_tests import clients_share as clients +from manila_tempest_tests.common import constants +from manila_tempest_tests import share_exceptions +from manila_tempest_tests.tests.api import base + +CONF = config.CONF +_MIN_SUPPORTED_MICROVERSION = '2.11' + + +@testtools.skipUnless(CONF.share.run_replication_tests, + 'Replication tests are disabled.') +@testtools.skipIf( + CONF.share.multitenancy_enabled, + "Only for driver_handles_share_servers = False driver mode.") +@base.skip_if_microversion_lt(_MIN_SUPPORTED_MICROVERSION) +class ReplicationAdminTest(base.BaseSharesAdminTest): + + @classmethod + def resource_setup(cls): + super(ReplicationAdminTest, cls).resource_setup() + # Create share_type + name = data_utils.rand_name(constants.TEMPEST_MANILA_PREFIX) + cls.admin_client = clients.AdminManager().shares_v2_client + cls.replication_type = CONF.share.backend_replication_type + + if cls.replication_type not in constants.REPLICATION_TYPE_CHOICES: + raise share_exceptions.ShareReplicationTypeException( + replication_type=cls.replication_type + ) + cls.zones = cls.get_availability_zones(client=cls.admin_client) + cls.share_zone = cls.zones[0] + cls.replica_zone = cls.zones[-1] + + cls.extra_specs = cls.add_required_extra_specs_to_dict( + {"replication_type": cls.replication_type}) + share_type = cls.create_share_type( + name, + cleanup_in_class=True, + extra_specs=cls.extra_specs, + client=cls.admin_client) + cls.share_type = share_type["share_type"] + # Create share with above share_type + cls.share = cls.create_share(size=2, + share_type_id=cls.share_type["id"], + availability_zone=cls.share_zone,) + cls.replica = cls.shares_v2_client.list_share_replicas( + share_id=cls.share['id'])[0] + + @test.attr(type=["gate"]) + @testtools.skipUnless(CONF.share.run_extend_tests, + 'Extend share tests are disabled.') + def test_extend_replicated_share(self): + # Test extend share + new_size = self.share["size"] + 1 + self.shares_v2_client.extend_share(self.share["id"], new_size) + self.shares_v2_client.wait_for_share_status(self.share["id"], + "available") + share = self.shares_v2_client.get_share(self.share["id"]) + self.assertEqual(new_size, int(share["size"])) + + @test.attr(type=["gate"]) + @testtools.skipUnless(CONF.share.run_shrink_tests, + 'Shrink share tests are disabled.') + def test_shrink_replicated_share(self): + share = self.shares_v2_client.get_share(self.share["id"]) + new_size = self.share["size"] - 1 + self.shares_v2_client.shrink_share(self.share["id"], new_size) + self.shares_v2_client.wait_for_share_status(share["id"], "available") + shrink_share = self.shares_v2_client.get_share(self.share["id"]) + self.assertEqual(new_size, int(shrink_share["size"])) + + @test.attr(type=["gate", "positive"]) + @testtools.skipUnless(CONF.share.run_manage_unmanage_tests, + 'Manage/Unmanage Tests are disabled.') + def test_manage_share_for_replication_type(self): + """Manage a share with replication share type.""" + # Create a share and unmanage it + share = self.create_share(size=2, + share_type_id=self.share_type["id"], + availability_zone=self.share_zone, + cleanup_in_class=True) + share = self.shares_v2_client.get_share(share["id"]) + export_locations = self.shares_v2_client.list_share_export_locations( + share["id"]) + export_path = export_locations[0]['path'] + + self.shares_v2_client.unmanage_share(share['id']) + self.shares_v2_client.wait_for_resource_deletion(share_id=share['id']) + + # Manage the previously unmanaged share + managed_share = self.shares_v2_client.manage_share( + share['host'], share['share_proto'], + export_path, self.share_type['id']) + self.shares_v2_client.wait_for_share_status( + managed_share['id'], 'available') + + # Add managed share to cleanup queue + self.method_resources.insert( + 0, {'type': 'share', 'id': managed_share['id'], + 'client': self.shares_v2_client}) + + # Make sure a replica can be added to newly managed share + self.create_share_replica(managed_share['id'], self.replica_zone, + cleanup=True) + + @test.attr(type=["gate", "negative"]) + @testtools.skipUnless(CONF.share.run_manage_unmanage_tests, + 'Manage/Unmanage Tests are disabled.') + def test_unmanage_replicated_share_with_replica(self): + """Try to unmanage a share having replica.""" + # Create a share replica before unmanaging the share + self.create_share_replica(self.share["id"], self.replica_zone, + cleanup=True) + self.assertRaises( + lib_exc.Conflict, + self.shares_v2_client.unmanage_share, + share_id=self.share['id']) + + @test.attr(type=["gate", "positive"]) + @testtools.skipUnless(CONF.share.run_manage_unmanage_tests, + 'Manage/Unmanage Tests are disabled.') + def test_unmanage_replicated_share_with_no_replica(self): + """Unmanage a replication type share that does not have replica.""" + share = self.create_share(size=2, + share_type_id=self.share_type["id"], + availability_zone=self.share_zone,) + self.shares_v2_client.unmanage_share(share['id']) + self.shares_v2_client.wait_for_resource_deletion(share_id=share['id']) + + @test.attr(type=["gate", "negative"]) + @testtools.skipUnless(CONF.share.run_manage_unmanage_snapshot_tests, + 'Manage/Unmanage Snapshot Tests are disabled.') + def test_manage_replicated_share_snapshot(self): + """Try to manage a snapshot of the replicated.""" + # Create a share replica before managing the snapshot + self.create_share_replica(self.share["id"], self.replica_zone, + cleanup=True) + self.assertRaises( + lib_exc.Conflict, + self.shares_v2_client.manage_snapshot, + share_id=self.share['id'], + provider_location="127.0.0.1:/fake_provider_location/" + "manila_share_9dc61f49_fbc8_48d7_9337_2f9593d9") + + @test.attr(type=["gate", "negative"]) + @testtools.skipUnless(CONF.share.run_manage_unmanage_snapshot_tests, + 'Manage/Unmanage Snapshot Tests are disabled.') + def test_unmanage_replicated_share_snapshot(self): + """Try to unmanage a snapshot of the replicated share with replica.""" + # Create a share replica before unmanaging the snapshot + self.create_share_replica(self.share["id"], self.replica_zone, + cleanup=True) + snapshot = self.create_snapshot_wait_for_active(self.share["id"]) + self.assertRaises( + lib_exc.Conflict, + self.shares_v2_client.unmanage_snapshot, + snapshot_id=snapshot['id']) + + @test.attr(type=["gate", "positive"]) + @testtools.skipUnless(CONF.share.run_manage_unmanage_snapshot_tests, + 'Manage/Unmanage Snapshot Tests are disabled.') + def test_unmanage_replicated_share_snapshot_with_no_replica(self): + """Unmanage a snapshot of the replicated share with no replica.""" + share = self.create_share(size=2, + share_type_id=self.share_type["id"], + availability_zone=self.share_zone,) + + snapshot = self.create_snapshot_wait_for_active(share["id"]) + self.shares_v2_client.unmanage_snapshot(snapshot_id=snapshot['id']) + self.shares_v2_client.wait_for_resource_deletion( + snapshot_id=snapshot['id']) diff --git a/releasenotes/notes/manage-unmanage-replicated-share-fa90ce34372b6df5.yaml b/releasenotes/notes/manage-unmanage-replicated-share-fa90ce34372b6df5.yaml new file mode 100644 index 0000000000..4d6ba399f7 --- /dev/null +++ b/releasenotes/notes/manage-unmanage-replicated-share-fa90ce34372b6df5.yaml @@ -0,0 +1,12 @@ +--- +features: + - Share can be managed with replication_type extra-spec + in the share_type +issues: + - Managing a share with replication_type can only be + possible if the share does not already have replicas. +fixes: + - Retrying to manage shares in ``manage_error`` status + works as expected. + - Snapshot manage and unmange operations are disabled + for shares with replicas.