NetApp cDOT: Avoid cleaning up 'invalid' mirrors
NetApp cDOT SnapMirrors cannot be established from one FlexVol to itself, however, the cDOT driver issues ZAPIs to delete/release SnapMirrors between the volume and itself. Avoid this behavior and add unit test coverage. Change-Id: I8ca096dfe8463511595a67cbea3ca31a80c0e5fb Closes-Bug: #1578328
This commit is contained in:
parent
7a16eb685b
commit
8cc148fe91
@ -1240,8 +1240,9 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
# Ensure that all potential snapmirror relationships and their metadata
|
||||
# involving the replica are destroyed.
|
||||
for other_replica in replica_list:
|
||||
dm_session.delete_snapmirror(other_replica, replica)
|
||||
dm_session.delete_snapmirror(replica, other_replica)
|
||||
if other_replica['id'] != replica['id']:
|
||||
dm_session.delete_snapmirror(other_replica, replica)
|
||||
dm_session.delete_snapmirror(replica, other_replica)
|
||||
|
||||
# 2. Delete share
|
||||
vserver_client = data_motion.get_client_for_backend(
|
||||
|
@ -41,9 +41,14 @@ from manila.share.drivers.netapp import utils as na_utils
|
||||
from manila.share import share_types
|
||||
from manila.share import utils as share_utils
|
||||
from manila import test
|
||||
from manila.tests import fake_share
|
||||
from manila.tests.share.drivers.netapp.dataontap import fakes as fake
|
||||
|
||||
|
||||
def fake_replica(**kwargs):
|
||||
return fake_share.fake_replica(for_manager=True, **kwargs)
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
class NetAppFileStorageLibraryTestCase(test.TestCase):
|
||||
|
||||
@ -2283,6 +2288,16 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
|
||||
fake.BACKEND_NAME, vserver_name=fake.VSERVER1)
|
||||
|
||||
def test_delete_replica(self):
|
||||
|
||||
active_replica = fake_replica(
|
||||
replica_state=constants.REPLICA_STATE_ACTIVE)
|
||||
replica_1 = fake_replica(
|
||||
replica_state=constants.REPLICA_STATE_IN_SYNC,
|
||||
host=fake.MANILA_HOST_NAME)
|
||||
replica_2 = fake_replica(
|
||||
replica_state=constants.REPLICA_STATE_OUT_OF_SYNC)
|
||||
replica_list = [active_replica, replica_1, replica_2]
|
||||
|
||||
self.mock_object(self.library,
|
||||
'_deallocate_container',
|
||||
mock.Mock())
|
||||
@ -2297,19 +2312,30 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
|
||||
mock.Mock(return_value=fake.VSERVER1))
|
||||
|
||||
result = self.library.delete_replica(None,
|
||||
[fake.SHARE],
|
||||
fake.SHARE,
|
||||
replica_list,
|
||||
replica_1,
|
||||
[],
|
||||
share_server=None)
|
||||
self.assertIsNone(result)
|
||||
mock_dm_session.delete_snapmirror.assert_called_with(fake.SHARE,
|
||||
fake.SHARE)
|
||||
self.assertEqual(2, mock_dm_session.delete_snapmirror.call_count)
|
||||
mock_dm_session.delete_snapmirror.assert_has_calls([
|
||||
mock.call(active_replica, replica_1),
|
||||
mock.call(replica_2, replica_1),
|
||||
mock.call(replica_1, replica_2),
|
||||
mock.call(replica_1, active_replica)],
|
||||
any_order=True)
|
||||
self.assertEqual(4, mock_dm_session.delete_snapmirror.call_count)
|
||||
data_motion.get_client_for_backend.assert_called_with(
|
||||
fake.BACKEND_NAME, vserver_name=mock.ANY)
|
||||
self.assertEqual(1, data_motion.get_client_for_backend.call_count)
|
||||
|
||||
def test_delete_replica_with_share_server(self):
|
||||
|
||||
active_replica = fake_replica(
|
||||
replica_state=constants.REPLICA_STATE_ACTIVE)
|
||||
replica = fake_replica(replica_state=constants.REPLICA_STATE_IN_SYNC,
|
||||
host=fake.MANILA_HOST_NAME)
|
||||
replica_list = [active_replica, replica]
|
||||
|
||||
self.mock_object(self.library,
|
||||
'_deallocate_container',
|
||||
mock.Mock())
|
||||
@ -2324,18 +2350,25 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
|
||||
mock.Mock(return_value=fake.VSERVER1))
|
||||
|
||||
result = self.library.delete_replica(None,
|
||||
[fake.SHARE],
|
||||
fake.SHARE,
|
||||
replica_list,
|
||||
replica,
|
||||
[],
|
||||
share_server=fake.SHARE_SERVER)
|
||||
self.assertIsNone(result)
|
||||
mock_dm_session.delete_snapmirror.assert_called_with(fake.SHARE,
|
||||
fake.SHARE)
|
||||
self.assertEqual(2, mock_dm_session.delete_snapmirror.call_count)
|
||||
mock_dm_session.delete_snapmirror.assert_has_calls([
|
||||
mock.call(active_replica, replica),
|
||||
mock.call(replica, active_replica)],
|
||||
any_order=True)
|
||||
data_motion.get_client_for_backend.assert_called_once_with(
|
||||
fake.BACKEND_NAME, vserver_name=fake.VSERVER1)
|
||||
|
||||
def test_delete_replica_share_absent_on_backend(self):
|
||||
active_replica = fake_replica(
|
||||
replica_state=constants.REPLICA_STATE_ACTIVE)
|
||||
replica = fake_replica(replica_state=constants.REPLICA_STATE_IN_SYNC,
|
||||
host=fake.MANILA_HOST_NAME)
|
||||
replica_list = [active_replica, replica]
|
||||
|
||||
self.mock_object(self.library,
|
||||
'_deallocate_container',
|
||||
mock.Mock())
|
||||
@ -2352,16 +2385,17 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
|
||||
mock.Mock(return_value=fake.VSERVER1))
|
||||
|
||||
result = self.library.delete_replica(None,
|
||||
[fake.SHARE],
|
||||
fake.SHARE,
|
||||
replica_list,
|
||||
replica,
|
||||
[],
|
||||
share_server=None)
|
||||
|
||||
self.assertIsNone(result)
|
||||
self.assertFalse(self.library._deallocate_container.called)
|
||||
mock_dm_session.delete_snapmirror.assert_called_with(fake.SHARE,
|
||||
fake.SHARE)
|
||||
self.assertEqual(2, mock_dm_session.delete_snapmirror.call_count)
|
||||
mock_dm_session.delete_snapmirror.assert_has_calls([
|
||||
mock.call(active_replica, replica),
|
||||
mock.call(replica, active_replica)],
|
||||
any_order=True)
|
||||
data_motion.get_client_for_backend.assert_called_with(
|
||||
fake.BACKEND_NAME, vserver_name=mock.ANY)
|
||||
self.assertEqual(1, data_motion.get_client_for_backend.call_count)
|
||||
|
@ -67,6 +67,8 @@ IPSPACE = 'fake_ipspace'
|
||||
IPSPACE_ID = '27d38c27-3e8b-4d7d-9d91-fcf295e3ac8f'
|
||||
MTU = 1234
|
||||
DEFAULT_MTU = 1500
|
||||
MANILA_HOST_NAME = '%(host)s@%(backend)s#%(pool)s' % {
|
||||
'host': HOST_NAME, 'backend': BACKEND_NAME, 'pool': POOL_NAME}
|
||||
|
||||
CLIENT_KWARGS = {
|
||||
'username': 'admin',
|
||||
@ -80,8 +82,7 @@ CLIENT_KWARGS = {
|
||||
|
||||
SHARE = {
|
||||
'id': SHARE_ID,
|
||||
'host': '%(host)s@%(backend)s#%(pool)s' % {
|
||||
'host': HOST_NAME, 'backend': BACKEND_NAME, 'pool': POOL_NAME},
|
||||
'host': MANILA_HOST_NAME,
|
||||
'project_id': TENANT_ID,
|
||||
'name': SHARE_NAME,
|
||||
'size': SHARE_SIZE,
|
||||
|
@ -0,0 +1,5 @@
|
||||
---
|
||||
fixes:
|
||||
- Changed share replica deletion logic in the NetApp cDOT driver
|
||||
to disregard invalid replication relationships from among those recorded
|
||||
by the driver to clean up.
|
Loading…
Reference in New Issue
Block a user