Tiramisu: Add groups param to failover_host
failover_host is the interface for Cheesecake. Currently it passes volumes to the failover_host interface in the driver. If a backend supports both Cheesecase and Tiramisu, it makes sense for the driver to failover a group instead of individual volumes if a volume is in a replication group. So this patch passes groups to the failover_host interface in the driver in addition to volumes so driver can decide whether to failover a replication group. Change-Id: I9842eec1a50ffe65a9490e2ac0c00b468f18b30a Partially-Implements: blueprint replication-cg
This commit is contained in:
parent
544d13ef0a
commit
32e67f3119
@ -902,7 +902,7 @@ class API(base.Base):
|
|||||||
gsnapshot.save()
|
gsnapshot.save()
|
||||||
|
|
||||||
def _check_type(self, group):
|
def _check_type(self, group):
|
||||||
if not vol_utils.is_group_a_replication_group_type(group):
|
if not group.is_replicated:
|
||||||
msg = _("Group %s is not a replication group type.") % group.id
|
msg = _("Group %s is not a replication group type.") % group.id
|
||||||
LOG.error(msg)
|
LOG.error(msg)
|
||||||
raise exception.InvalidGroupType(reason=msg)
|
raise exception.InvalidGroupType(reason=msg)
|
||||||
|
@ -21,6 +21,7 @@ from cinder.i18n import _
|
|||||||
from cinder import objects
|
from cinder import objects
|
||||||
from cinder.objects import base
|
from cinder.objects import base
|
||||||
from cinder.objects import fields as c_fields
|
from cinder.objects import fields as c_fields
|
||||||
|
from cinder.volume import utils as vol_utils
|
||||||
|
|
||||||
|
|
||||||
@base.CinderObjectRegistry.register
|
@base.CinderObjectRegistry.register
|
||||||
@ -177,6 +178,14 @@ class Group(base.CinderPersistentObject, base.CinderObject,
|
|||||||
with self.obj_as_admin():
|
with self.obj_as_admin():
|
||||||
db.group_destroy(self._context, self.id)
|
db.group_destroy(self._context, self.id)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def is_replicated(self):
|
||||||
|
if (vol_utils.is_group_a_type(self, "group_replication_enabled") or
|
||||||
|
vol_utils.is_group_a_type(
|
||||||
|
self, "consistent_group_replication_enabled")):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
@base.CinderObjectRegistry.register
|
@base.CinderObjectRegistry.register
|
||||||
class GroupList(base.ObjectListBase, base.CinderObject):
|
class GroupList(base.ObjectListBase, base.CinderObject):
|
||||||
@ -207,3 +216,18 @@ class GroupList(base.ObjectListBase, base.CinderObject):
|
|||||||
return base.obj_make_list(context, cls(context),
|
return base.obj_make_list(context, cls(context),
|
||||||
objects.Group,
|
objects.Group,
|
||||||
groups)
|
groups)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_all_replicated(cls, context, filters=None, marker=None, limit=None,
|
||||||
|
offset=None, sort_keys=None, sort_dirs=None):
|
||||||
|
groups = db.group_get_all(
|
||||||
|
context, filters=filters, marker=marker, limit=limit,
|
||||||
|
offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs)
|
||||||
|
grp_obj_list = base.obj_make_list(context, cls(context),
|
||||||
|
objects.Group,
|
||||||
|
groups)
|
||||||
|
|
||||||
|
out_groups = [grp for grp in grp_obj_list
|
||||||
|
if grp.is_replicated]
|
||||||
|
|
||||||
|
return out_groups
|
||||||
|
@ -189,7 +189,7 @@ class FakeLoggingVolumeDriver(lvm.LVMVolumeDriver):
|
|||||||
model_update = super(FakeLoggingVolumeDriver, self).create_group(
|
model_update = super(FakeLoggingVolumeDriver, self).create_group(
|
||||||
context, group)
|
context, group)
|
||||||
try:
|
try:
|
||||||
if vol_utils.is_group_a_replication_group_type(group):
|
if group.is_replicated:
|
||||||
# Sets the new group's replication_status to disabled
|
# Sets the new group's replication_status to disabled
|
||||||
model_update['replication_status'] = (
|
model_update['replication_status'] = (
|
||||||
fields.ReplicationStatus.DISABLED)
|
fields.ReplicationStatus.DISABLED)
|
||||||
|
@ -1057,7 +1057,7 @@ class GroupsAPITestCase(test.TestCase):
|
|||||||
|
|
||||||
@mock.patch('cinder.volume.utils.is_replicated_spec',
|
@mock.patch('cinder.volume.utils.is_replicated_spec',
|
||||||
return_value=True)
|
return_value=True)
|
||||||
@mock.patch('cinder.volume.utils.is_group_a_replication_group_type',
|
@mock.patch('cinder.volume.utils.is_group_a_type',
|
||||||
return_value=True)
|
return_value=True)
|
||||||
def test_enable_replication(self, mock_rep_grp_type, mock_rep_vol_type):
|
def test_enable_replication(self, mock_rep_grp_type, mock_rep_vol_type):
|
||||||
req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' %
|
req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' %
|
||||||
@ -1078,7 +1078,7 @@ class GroupsAPITestCase(test.TestCase):
|
|||||||
@ddt.data((True, False), (False, True), (False, False))
|
@ddt.data((True, False), (False, True), (False, False))
|
||||||
@ddt.unpack
|
@ddt.unpack
|
||||||
@mock.patch('cinder.volume.utils.is_replicated_spec')
|
@mock.patch('cinder.volume.utils.is_replicated_spec')
|
||||||
@mock.patch('cinder.volume.utils.is_group_a_replication_group_type')
|
@mock.patch('cinder.volume.utils.is_group_a_type')
|
||||||
def test_enable_replication_wrong_type(self, is_grp_rep_type,
|
def test_enable_replication_wrong_type(self, is_grp_rep_type,
|
||||||
is_vol_rep_type,
|
is_vol_rep_type,
|
||||||
mock_rep_grp_type,
|
mock_rep_grp_type,
|
||||||
@ -1097,7 +1097,7 @@ class GroupsAPITestCase(test.TestCase):
|
|||||||
|
|
||||||
@mock.patch('cinder.volume.utils.is_replicated_spec',
|
@mock.patch('cinder.volume.utils.is_replicated_spec',
|
||||||
return_value=False)
|
return_value=False)
|
||||||
@mock.patch('cinder.volume.utils.is_group_a_replication_group_type',
|
@mock.patch('cinder.volume.utils.is_group_a_type',
|
||||||
return_value=True)
|
return_value=True)
|
||||||
def test_enable_replication_wrong_group_type(self, mock_rep_grp_type,
|
def test_enable_replication_wrong_group_type(self, mock_rep_grp_type,
|
||||||
mock_rep_vol_type):
|
mock_rep_vol_type):
|
||||||
@ -1113,7 +1113,7 @@ class GroupsAPITestCase(test.TestCase):
|
|||||||
|
|
||||||
@mock.patch('cinder.volume.utils.is_replicated_spec',
|
@mock.patch('cinder.volume.utils.is_replicated_spec',
|
||||||
return_value=True)
|
return_value=True)
|
||||||
@mock.patch('cinder.volume.utils.is_group_a_replication_group_type',
|
@mock.patch('cinder.volume.utils.is_group_a_type',
|
||||||
return_value=True)
|
return_value=True)
|
||||||
@ddt.data((GROUP_REPLICATION_MICRO_VERSION, True,
|
@ddt.data((GROUP_REPLICATION_MICRO_VERSION, True,
|
||||||
fields.GroupStatus.CREATING,
|
fields.GroupStatus.CREATING,
|
||||||
@ -1146,7 +1146,7 @@ class GroupsAPITestCase(test.TestCase):
|
|||||||
|
|
||||||
@mock.patch('cinder.volume.utils.is_replicated_spec',
|
@mock.patch('cinder.volume.utils.is_replicated_spec',
|
||||||
return_value=True)
|
return_value=True)
|
||||||
@mock.patch('cinder.volume.utils.is_group_a_replication_group_type',
|
@mock.patch('cinder.volume.utils.is_group_a_type',
|
||||||
return_value=True)
|
return_value=True)
|
||||||
def test_disable_replication(self, mock_rep_grp_type, mock_rep_vol_type):
|
def test_disable_replication(self, mock_rep_grp_type, mock_rep_vol_type):
|
||||||
req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' %
|
req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' %
|
||||||
@ -1167,7 +1167,7 @@ class GroupsAPITestCase(test.TestCase):
|
|||||||
|
|
||||||
@mock.patch('cinder.volume.utils.is_replicated_spec',
|
@mock.patch('cinder.volume.utils.is_replicated_spec',
|
||||||
return_value=True)
|
return_value=True)
|
||||||
@mock.patch('cinder.volume.utils.is_group_a_replication_group_type',
|
@mock.patch('cinder.volume.utils.is_group_a_type',
|
||||||
return_value=True)
|
return_value=True)
|
||||||
@ddt.data((GROUP_REPLICATION_MICRO_VERSION, True,
|
@ddt.data((GROUP_REPLICATION_MICRO_VERSION, True,
|
||||||
fields.GroupStatus.CREATING,
|
fields.GroupStatus.CREATING,
|
||||||
@ -1209,7 +1209,7 @@ class GroupsAPITestCase(test.TestCase):
|
|||||||
|
|
||||||
@mock.patch('cinder.volume.utils.is_replicated_spec',
|
@mock.patch('cinder.volume.utils.is_replicated_spec',
|
||||||
return_value=True)
|
return_value=True)
|
||||||
@mock.patch('cinder.volume.utils.is_group_a_replication_group_type',
|
@mock.patch('cinder.volume.utils.is_group_a_type',
|
||||||
return_value=True)
|
return_value=True)
|
||||||
def test_failover_replication(self, mock_rep_grp_type, mock_rep_vol_type):
|
def test_failover_replication(self, mock_rep_grp_type, mock_rep_vol_type):
|
||||||
req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' %
|
req = fakes.HTTPRequest.blank('/v3/%s/groups/%s/action' %
|
||||||
@ -1230,7 +1230,7 @@ class GroupsAPITestCase(test.TestCase):
|
|||||||
|
|
||||||
@mock.patch('cinder.volume.utils.is_replicated_spec',
|
@mock.patch('cinder.volume.utils.is_replicated_spec',
|
||||||
return_value=True)
|
return_value=True)
|
||||||
@mock.patch('cinder.volume.utils.is_group_a_replication_group_type',
|
@mock.patch('cinder.volume.utils.is_group_a_type',
|
||||||
return_value=True)
|
return_value=True)
|
||||||
@ddt.data((GROUP_REPLICATION_MICRO_VERSION, True,
|
@ddt.data((GROUP_REPLICATION_MICRO_VERSION, True,
|
||||||
fields.GroupStatus.CREATING,
|
fields.GroupStatus.CREATING,
|
||||||
@ -1272,7 +1272,7 @@ class GroupsAPITestCase(test.TestCase):
|
|||||||
|
|
||||||
@mock.patch('cinder.volume.utils.is_replicated_spec',
|
@mock.patch('cinder.volume.utils.is_replicated_spec',
|
||||||
return_value=True)
|
return_value=True)
|
||||||
@mock.patch('cinder.volume.utils.is_group_a_replication_group_type',
|
@mock.patch('cinder.volume.utils.is_group_a_type',
|
||||||
return_value=True)
|
return_value=True)
|
||||||
@mock.patch('cinder.volume.rpcapi.VolumeAPI.list_replication_targets')
|
@mock.patch('cinder.volume.rpcapi.VolumeAPI.list_replication_targets')
|
||||||
def test_list_replication_targets(self, mock_list_rep_targets,
|
def test_list_replication_targets(self, mock_list_rep_targets,
|
||||||
|
@ -179,7 +179,32 @@ class TestGroup(test_objects.BaseObjectsTestCase):
|
|||||||
self.assertEqual(is_set, converted_group.obj_attr_is_set(key))
|
self.assertEqual(is_set, converted_group.obj_attr_is_set(key))
|
||||||
self.assertEqual('name', converted_group.name)
|
self.assertEqual('name', converted_group.name)
|
||||||
|
|
||||||
|
@mock.patch('cinder.volume.group_types.get_group_type_specs')
|
||||||
|
def test_is_replicated_true(self, mock_get_specs):
|
||||||
|
mock_get_specs.return_value = '<is> True'
|
||||||
|
group = objects.Group(self.context, group_type_id=fake.GROUP_TYPE_ID)
|
||||||
|
# NOTE(xyang): Changed the following from self.assertTrue(
|
||||||
|
# group.is_replicated) to self.assertEqual(True, group.is_replicated)
|
||||||
|
# to address a review comment. This way this test will still pass
|
||||||
|
# even if is_replicated is a method and not a property.
|
||||||
|
self.assertTrue(True, group.is_replicated)
|
||||||
|
|
||||||
|
@ddt.data('<is> False', None, 'notASpecValueWeCareAbout')
|
||||||
|
def test_is_replicated_false(self, spec_value):
|
||||||
|
with mock.patch('cinder.volume.group_types'
|
||||||
|
'.get_group_type_specs') as mock_get_specs:
|
||||||
|
mock_get_specs.return_value = spec_value
|
||||||
|
group = objects.Group(self.context,
|
||||||
|
group_type_id=fake.GROUP_TYPE_ID)
|
||||||
|
# NOTE(xyang): Changed the following from self.assertFalse(
|
||||||
|
# group.is_replicated) to self.assertEqual(False,
|
||||||
|
# group.is_replicated) to address a review comment. This way this
|
||||||
|
# test will still pass even if is_replicated is a method and not
|
||||||
|
# a property.
|
||||||
|
self.assertEqual(False, group.is_replicated)
|
||||||
|
|
||||||
|
|
||||||
|
@ddt.ddt
|
||||||
class TestGroupList(test_objects.BaseObjectsTestCase):
|
class TestGroupList(test_objects.BaseObjectsTestCase):
|
||||||
@mock.patch('cinder.db.group_get_all',
|
@mock.patch('cinder.db.group_get_all',
|
||||||
return_value=[fake_group])
|
return_value=[fake_group])
|
||||||
@ -224,3 +249,26 @@ class TestGroupList(test_objects.BaseObjectsTestCase):
|
|||||||
limit=1, offset=None, sort_keys='id', sort_dirs='asc')
|
limit=1, offset=None, sort_keys='id', sort_dirs='asc')
|
||||||
TestGroup._compare(self, fake_group,
|
TestGroup._compare(self, fake_group,
|
||||||
groups[0])
|
groups[0])
|
||||||
|
|
||||||
|
@ddt.data({'cluster_name': 'fake_cluster'}, {'host': 'fake_host'})
|
||||||
|
@mock.patch('cinder.volume.group_types.get_group_type_specs')
|
||||||
|
@mock.patch('cinder.db.group_get_all')
|
||||||
|
def test_get_all_replicated(self, filters, mock_get_groups,
|
||||||
|
mock_get_specs):
|
||||||
|
mock_get_specs.return_value = '<is> True'
|
||||||
|
fake_group2 = fake_group.copy()
|
||||||
|
fake_group2['id'] = fake.GROUP2_ID
|
||||||
|
fake_group2['cluster_name'] = 'fake_cluster'
|
||||||
|
if filters.get('cluster_name'):
|
||||||
|
mock_get_groups.return_value = [fake_group2]
|
||||||
|
else:
|
||||||
|
mock_get_groups.return_value = [fake_group]
|
||||||
|
res = objects.GroupList.get_all_replicated(self.context,
|
||||||
|
filters=filters)
|
||||||
|
self.assertEqual(1, len(res))
|
||||||
|
if filters.get('cluster_name'):
|
||||||
|
self.assertEqual(fake.GROUP2_ID, res[0].id)
|
||||||
|
self.assertEqual('fake_cluster', res[0].cluster_name)
|
||||||
|
else:
|
||||||
|
self.assertEqual(fake.GROUP_ID, res[0].id)
|
||||||
|
self.assertIsNone(res[0].cluster_name)
|
||||||
|
@ -3161,8 +3161,8 @@ class DellSCSanISCSIDriverTestCase(test.TestCase):
|
|||||||
{'volume_id': fake.VOLUME2_ID, 'updates':
|
{'volume_id': fake.VOLUME2_ID, 'updates':
|
||||||
{'replication_status': 'failed-over',
|
{'replication_status': 'failed-over',
|
||||||
'provider_id': '2.2'}}]
|
'provider_id': '2.2'}}]
|
||||||
destssn, volume_update = self.driver.failover_host(
|
destssn, volume_update, __ = self.driver.failover_host(
|
||||||
{}, volumes, '12345')
|
{}, volumes, '12345', [])
|
||||||
self.assertEqual(expected_destssn, destssn)
|
self.assertEqual(expected_destssn, destssn)
|
||||||
self.assertEqual(expected_volume_update, volume_update)
|
self.assertEqual(expected_volume_update, volume_update)
|
||||||
# Good run. Not all volumes replicated.
|
# Good run. Not all volumes replicated.
|
||||||
@ -3175,8 +3175,8 @@ class DellSCSanISCSIDriverTestCase(test.TestCase):
|
|||||||
{'status': 'error'}}]
|
{'status': 'error'}}]
|
||||||
self.driver.failed_over = False
|
self.driver.failed_over = False
|
||||||
self.driver.active_backend_id = None
|
self.driver.active_backend_id = None
|
||||||
destssn, volume_update = self.driver.failover_host(
|
destssn, volume_update, __ = self.driver.failover_host(
|
||||||
{}, volumes, '12345')
|
{}, volumes, '12345', [])
|
||||||
self.assertEqual(expected_destssn, destssn)
|
self.assertEqual(expected_destssn, destssn)
|
||||||
self.assertEqual(expected_volume_update, volume_update)
|
self.assertEqual(expected_volume_update, volume_update)
|
||||||
# Good run. Not all volumes replicated. No replication_driver_data.
|
# Good run. Not all volumes replicated. No replication_driver_data.
|
||||||
@ -3189,8 +3189,8 @@ class DellSCSanISCSIDriverTestCase(test.TestCase):
|
|||||||
{'status': 'error'}}]
|
{'status': 'error'}}]
|
||||||
self.driver.failed_over = False
|
self.driver.failed_over = False
|
||||||
self.driver.active_backend_id = None
|
self.driver.active_backend_id = None
|
||||||
destssn, volume_update = self.driver.failover_host(
|
destssn, volume_update, __ = self.driver.failover_host(
|
||||||
{}, volumes, '12345')
|
{}, volumes, '12345', [])
|
||||||
self.assertEqual(expected_destssn, destssn)
|
self.assertEqual(expected_destssn, destssn)
|
||||||
self.assertEqual(expected_volume_update, volume_update)
|
self.assertEqual(expected_volume_update, volume_update)
|
||||||
# Good run. No volumes replicated. No replication_driver_data.
|
# Good run. No volumes replicated. No replication_driver_data.
|
||||||
@ -3202,8 +3202,8 @@ class DellSCSanISCSIDriverTestCase(test.TestCase):
|
|||||||
{'status': 'error'}}]
|
{'status': 'error'}}]
|
||||||
self.driver.failed_over = False
|
self.driver.failed_over = False
|
||||||
self.driver.active_backend_id = None
|
self.driver.active_backend_id = None
|
||||||
destssn, volume_update = self.driver.failover_host(
|
destssn, volume_update, __ = self.driver.failover_host(
|
||||||
{}, volumes, '12345')
|
{}, volumes, '12345', [])
|
||||||
self.assertEqual(expected_destssn, destssn)
|
self.assertEqual(expected_destssn, destssn)
|
||||||
self.assertEqual(expected_volume_update, volume_update)
|
self.assertEqual(expected_volume_update, volume_update)
|
||||||
# Secondary not found.
|
# Secondary not found.
|
||||||
@ -3214,14 +3214,15 @@ class DellSCSanISCSIDriverTestCase(test.TestCase):
|
|||||||
self.driver.failover_host,
|
self.driver.failover_host,
|
||||||
{},
|
{},
|
||||||
volumes,
|
volumes,
|
||||||
'54321')
|
'54321',
|
||||||
|
[])
|
||||||
# Already failed over.
|
# Already failed over.
|
||||||
self.driver.failed_over = True
|
self.driver.failed_over = True
|
||||||
self.driver.failover_host({}, volumes, 'default')
|
self.driver.failover_host({}, volumes, 'default')
|
||||||
mock_failback_volumes.assert_called_once_with(volumes)
|
mock_failback_volumes.assert_called_once_with(volumes)
|
||||||
# Already failed over.
|
# Already failed over.
|
||||||
self.assertRaises(exception.InvalidReplicationTarget,
|
self.assertRaises(exception.InvalidReplicationTarget,
|
||||||
self.driver.failover_host, {}, volumes, '67890')
|
self.driver.failover_host, {}, volumes, '67890', [])
|
||||||
self.driver.replication_enabled = False
|
self.driver.replication_enabled = False
|
||||||
|
|
||||||
@mock.patch.object(storagecenter_iscsi.SCISCSIDriver,
|
@mock.patch.object(storagecenter_iscsi.SCISCSIDriver,
|
||||||
@ -3279,8 +3280,8 @@ class DellSCSanISCSIDriverTestCase(test.TestCase):
|
|||||||
{'volume_id': fake.VOLUME2_ID, 'updates':
|
{'volume_id': fake.VOLUME2_ID, 'updates':
|
||||||
{'replication_status': 'failed-over',
|
{'replication_status': 'failed-over',
|
||||||
'provider_id': '2.2'}}]
|
'provider_id': '2.2'}}]
|
||||||
destssn, volume_update = self.driver.failover_host(
|
destssn, volume_update, __ = self.driver.failover_host(
|
||||||
{}, volumes, '12345')
|
{}, volumes, '12345', [])
|
||||||
self.assertEqual(expected_destssn, destssn)
|
self.assertEqual(expected_destssn, destssn)
|
||||||
self.assertEqual(expected_volume_update, volume_update)
|
self.assertEqual(expected_volume_update, volume_update)
|
||||||
# Good run. Not all volumes replicated.
|
# Good run. Not all volumes replicated.
|
||||||
@ -3293,8 +3294,8 @@ class DellSCSanISCSIDriverTestCase(test.TestCase):
|
|||||||
{'status': 'error'}}]
|
{'status': 'error'}}]
|
||||||
self.driver.failed_over = False
|
self.driver.failed_over = False
|
||||||
self.driver.active_backend_id = None
|
self.driver.active_backend_id = None
|
||||||
destssn, volume_update = self.driver.failover_host(
|
destssn, volume_update, __ = self.driver.failover_host(
|
||||||
{}, volumes, '12345')
|
{}, volumes, '12345', [])
|
||||||
self.assertEqual(expected_destssn, destssn)
|
self.assertEqual(expected_destssn, destssn)
|
||||||
self.assertEqual(expected_volume_update, volume_update)
|
self.assertEqual(expected_volume_update, volume_update)
|
||||||
# Good run. Not all volumes replicated. No replication_driver_data.
|
# Good run. Not all volumes replicated. No replication_driver_data.
|
||||||
@ -3307,8 +3308,8 @@ class DellSCSanISCSIDriverTestCase(test.TestCase):
|
|||||||
{'status': 'error'}}]
|
{'status': 'error'}}]
|
||||||
self.driver.failed_over = False
|
self.driver.failed_over = False
|
||||||
self.driver.active_backend_id = None
|
self.driver.active_backend_id = None
|
||||||
destssn, volume_update = self.driver.failover_host(
|
destssn, volume_update, __ = self.driver.failover_host(
|
||||||
{}, volumes, '12345')
|
{}, volumes, '12345', [])
|
||||||
self.assertEqual(expected_destssn, destssn)
|
self.assertEqual(expected_destssn, destssn)
|
||||||
self.assertEqual(expected_volume_update, volume_update)
|
self.assertEqual(expected_volume_update, volume_update)
|
||||||
# Good run. No volumes replicated. No replication_driver_data.
|
# Good run. No volumes replicated. No replication_driver_data.
|
||||||
@ -3320,8 +3321,8 @@ class DellSCSanISCSIDriverTestCase(test.TestCase):
|
|||||||
{'status': 'error'}}]
|
{'status': 'error'}}]
|
||||||
self.driver.failed_over = False
|
self.driver.failed_over = False
|
||||||
self.driver.active_backend_id = None
|
self.driver.active_backend_id = None
|
||||||
destssn, volume_update = self.driver.failover_host(
|
destssn, volume_update, __ = self.driver.failover_host(
|
||||||
{}, volumes, '12345')
|
{}, volumes, '12345', [])
|
||||||
self.assertEqual(expected_destssn, destssn)
|
self.assertEqual(expected_destssn, destssn)
|
||||||
self.assertEqual(expected_volume_update, volume_update)
|
self.assertEqual(expected_volume_update, volume_update)
|
||||||
# Secondary not found.
|
# Secondary not found.
|
||||||
@ -3332,7 +3333,8 @@ class DellSCSanISCSIDriverTestCase(test.TestCase):
|
|||||||
self.driver.failover_host,
|
self.driver.failover_host,
|
||||||
{},
|
{},
|
||||||
volumes,
|
volumes,
|
||||||
'54321')
|
'54321',
|
||||||
|
[])
|
||||||
# Already failed over.
|
# Already failed over.
|
||||||
self.driver.failed_over = True
|
self.driver.failed_over = True
|
||||||
self.driver.failover_host({}, volumes, 'default')
|
self.driver.failover_host({}, volumes, 'default')
|
||||||
|
@ -1165,8 +1165,8 @@ class TestCommonAdapter(test.TestCase):
|
|||||||
fake_mirror.secondary_client.get_serial.return_value = (
|
fake_mirror.secondary_client.get_serial.return_value = (
|
||||||
device['backend_id'])
|
device['backend_id'])
|
||||||
fake.return_value = fake_mirror
|
fake.return_value = fake_mirror
|
||||||
backend_id, updates = common_adapter.failover_host(
|
backend_id, updates, __ = common_adapter.failover_host(
|
||||||
None, [vol1], device['backend_id'])
|
None, [vol1], device['backend_id'], [])
|
||||||
fake_mirror.promote_image.assert_called_once_with(
|
fake_mirror.promote_image.assert_called_once_with(
|
||||||
'mirror_' + vol1.id)
|
'mirror_' + vol1.id)
|
||||||
fake_mirror.secondary_client.get_serial.assert_called_with()
|
fake_mirror.secondary_client.get_serial.assert_called_with()
|
||||||
@ -1205,8 +1205,8 @@ class TestCommonAdapter(test.TestCase):
|
|||||||
fake_mirror.secondary_client.get_serial.return_value = (
|
fake_mirror.secondary_client.get_serial.return_value = (
|
||||||
device['backend_id'])
|
device['backend_id'])
|
||||||
fake.return_value = fake_mirror
|
fake.return_value = fake_mirror
|
||||||
backend_id, updates = common_adapter.failover_host(
|
backend_id, updates, __ = common_adapter.failover_host(
|
||||||
None, [vol1], 'default')
|
None, [vol1], 'default', [])
|
||||||
fake_mirror.promote_image.assert_called_once_with(
|
fake_mirror.promote_image.assert_called_once_with(
|
||||||
'mirror_' + vol1.id)
|
'mirror_' + vol1.id)
|
||||||
fake_mirror.secondary_client.get_serial.assert_called_with()
|
fake_mirror.secondary_client.get_serial.assert_called_with()
|
||||||
|
@ -5116,7 +5116,8 @@ class HPE3PARBaseDriver(object):
|
|||||||
expected_model = (self.REPLICATION_BACKEND_ID,
|
expected_model = (self.REPLICATION_BACKEND_ID,
|
||||||
[{'updates': {'replication_status':
|
[{'updates': {'replication_status':
|
||||||
'failed-over'},
|
'failed-over'},
|
||||||
'volume_id': self.VOLUME_ID}])
|
'volume_id': self.VOLUME_ID}],
|
||||||
|
[])
|
||||||
return_model = self.driver.failover_host(
|
return_model = self.driver.failover_host(
|
||||||
context.get_admin_context(),
|
context.get_admin_context(),
|
||||||
volumes,
|
volumes,
|
||||||
@ -5173,7 +5174,8 @@ class HPE3PARBaseDriver(object):
|
|||||||
expected_model = (None,
|
expected_model = (None,
|
||||||
[{'updates': {'replication_status':
|
[{'updates': {'replication_status':
|
||||||
'available'},
|
'available'},
|
||||||
'volume_id': self.VOLUME_ID}])
|
'volume_id': self.VOLUME_ID}],
|
||||||
|
[])
|
||||||
self.assertEqual(expected_model, return_model)
|
self.assertEqual(expected_model, return_model)
|
||||||
|
|
||||||
@mock.patch.object(volume_types, 'get_volume_type')
|
@mock.patch.object(volume_types, 'get_volume_type')
|
||||||
|
@ -3067,7 +3067,8 @@ class TestHPELeftHandISCSIDriver(HPELeftHandBaseDriver, test.TestCase):
|
|||||||
'failed-over',
|
'failed-over',
|
||||||
'provider_location':
|
'provider_location':
|
||||||
prov_location},
|
prov_location},
|
||||||
'volume_id': 1}])
|
'volume_id': 1}],
|
||||||
|
[])
|
||||||
self.assertEqual(expected_model, return_model)
|
self.assertEqual(expected_model, return_model)
|
||||||
|
|
||||||
@mock.patch.object(volume_types, 'get_volume_type')
|
@mock.patch.object(volume_types, 'get_volume_type')
|
||||||
@ -3164,7 +3165,8 @@ class TestHPELeftHandISCSIDriver(HPELeftHandBaseDriver, test.TestCase):
|
|||||||
'available',
|
'available',
|
||||||
'provider_location':
|
'provider_location':
|
||||||
prov_location},
|
prov_location},
|
||||||
'volume_id': 1}])
|
'volume_id': 1}],
|
||||||
|
[])
|
||||||
self.assertEqual(expected_model, return_model)
|
self.assertEqual(expected_model, return_model)
|
||||||
|
|
||||||
@mock.patch.object(volume_types, 'get_volume_type')
|
@mock.patch.object(volume_types, 'get_volume_type')
|
||||||
|
@ -3962,8 +3962,8 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase):
|
|||||||
old_client = driver.client
|
old_client = driver.client
|
||||||
old_replica_client = driver.replica_client
|
old_replica_client = driver.replica_client
|
||||||
old_replica = driver.replica
|
old_replica = driver.replica
|
||||||
secondary_id, volumes_update = driver.failover_host(
|
secondary_id, volumes_update, __ = driver.failover_host(
|
||||||
None, [self.volume], 'default')
|
None, [self.volume], 'default', [])
|
||||||
self.assertIn(driver.active_backend_id, ('', None))
|
self.assertIn(driver.active_backend_id, ('', None))
|
||||||
self.assertEqual(old_client, driver.client)
|
self.assertEqual(old_client, driver.client)
|
||||||
self.assertEqual(old_replica_client, driver.replica_client)
|
self.assertEqual(old_replica_client, driver.replica_client)
|
||||||
@ -3977,8 +3977,8 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase):
|
|||||||
old_client = driver.client
|
old_client = driver.client
|
||||||
old_replica_client = driver.replica_client
|
old_replica_client = driver.replica_client
|
||||||
old_replica = driver.replica
|
old_replica = driver.replica
|
||||||
secondary_id, volumes_update = driver.failover_host(
|
secondary_id, volumes_update, __ = driver.failover_host(
|
||||||
None, [self.volume], REPLICA_BACKEND_ID)
|
None, [self.volume], REPLICA_BACKEND_ID, [])
|
||||||
self.assertEqual(REPLICA_BACKEND_ID, driver.active_backend_id)
|
self.assertEqual(REPLICA_BACKEND_ID, driver.active_backend_id)
|
||||||
self.assertEqual(old_client, driver.replica_client)
|
self.assertEqual(old_client, driver.replica_client)
|
||||||
self.assertEqual(old_replica_client, driver.client)
|
self.assertEqual(old_replica_client, driver.client)
|
||||||
@ -3999,8 +3999,8 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase):
|
|||||||
old_client = driver.client
|
old_client = driver.client
|
||||||
old_replica_client = driver.replica_client
|
old_replica_client = driver.replica_client
|
||||||
old_replica = driver.replica
|
old_replica = driver.replica
|
||||||
secondary_id, volumes_update = driver.failover_host(
|
secondary_id, volumes_update, __ = driver.failover_host(
|
||||||
None, [self.volume], REPLICA_BACKEND_ID)
|
None, [self.volume], REPLICA_BACKEND_ID, [])
|
||||||
self.assertEqual(REPLICA_BACKEND_ID, driver.active_backend_id)
|
self.assertEqual(REPLICA_BACKEND_ID, driver.active_backend_id)
|
||||||
self.assertEqual(old_client, driver.client)
|
self.assertEqual(old_client, driver.client)
|
||||||
self.assertEqual(old_replica_client, driver.replica_client)
|
self.assertEqual(old_replica_client, driver.replica_client)
|
||||||
@ -4018,8 +4018,8 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase):
|
|||||||
old_client = driver.client
|
old_client = driver.client
|
||||||
old_replica_client = driver.replica_client
|
old_replica_client = driver.replica_client
|
||||||
old_replica = driver.replica
|
old_replica = driver.replica
|
||||||
secondary_id, volumes_update = driver.failover_host(
|
secondary_id, volumes_update, __ = driver.failover_host(
|
||||||
None, [self.volume], 'default')
|
None, [self.volume], 'default', [])
|
||||||
self.assertIn(driver.active_backend_id, ('', None))
|
self.assertIn(driver.active_backend_id, ('', None))
|
||||||
self.assertEqual(old_client, driver.replica_client)
|
self.assertEqual(old_client, driver.replica_client)
|
||||||
self.assertEqual(old_replica_client, driver.client)
|
self.assertEqual(old_replica_client, driver.client)
|
||||||
@ -4041,8 +4041,8 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase):
|
|||||||
self.mock_object(replication.ReplicaCommonDriver, 'failover')
|
self.mock_object(replication.ReplicaCommonDriver, 'failover')
|
||||||
self.mock_object(huawei_driver.HuaweiBaseDriver, '_get_volume_params',
|
self.mock_object(huawei_driver.HuaweiBaseDriver, '_get_volume_params',
|
||||||
return_value={'replication_enabled': 'true'})
|
return_value={'replication_enabled': 'true'})
|
||||||
secondary_id, volumes_update = driver.failover_host(
|
secondary_id, volumes_update, __ = driver.failover_host(
|
||||||
None, [self.replica_volume], REPLICA_BACKEND_ID)
|
None, [self.replica_volume], REPLICA_BACKEND_ID, [])
|
||||||
self.assertEqual(REPLICA_BACKEND_ID, driver.active_backend_id)
|
self.assertEqual(REPLICA_BACKEND_ID, driver.active_backend_id)
|
||||||
self.assertEqual(old_client, driver.replica_client)
|
self.assertEqual(old_client, driver.replica_client)
|
||||||
self.assertEqual(old_replica_client, driver.client)
|
self.assertEqual(old_replica_client, driver.client)
|
||||||
@ -4071,8 +4071,8 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase):
|
|||||||
old_replica = driver.replica
|
old_replica = driver.replica
|
||||||
self.mock_object(huawei_driver.HuaweiBaseDriver, '_get_volume_params',
|
self.mock_object(huawei_driver.HuaweiBaseDriver, '_get_volume_params',
|
||||||
return_value={'replication_enabled': 'true'})
|
return_value={'replication_enabled': 'true'})
|
||||||
secondary_id, volumes_update = driver.failover_host(
|
secondary_id, volumes_update, __ = driver.failover_host(
|
||||||
None, [volume], REPLICA_BACKEND_ID)
|
None, [volume], REPLICA_BACKEND_ID, [])
|
||||||
self.assertEqual(driver.active_backend_id, REPLICA_BACKEND_ID)
|
self.assertEqual(driver.active_backend_id, REPLICA_BACKEND_ID)
|
||||||
self.assertEqual(old_client, driver.replica_client)
|
self.assertEqual(old_client, driver.replica_client)
|
||||||
self.assertEqual(old_replica_client, driver.client)
|
self.assertEqual(old_replica_client, driver.client)
|
||||||
@ -4099,8 +4099,8 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase):
|
|||||||
old_client = driver.client
|
old_client = driver.client
|
||||||
old_replica_client = driver.replica_client
|
old_replica_client = driver.replica_client
|
||||||
old_replica = driver.replica
|
old_replica = driver.replica
|
||||||
secondary_id, volumes_update = driver.failover_host(
|
secondary_id, volumes_update, __ = driver.failover_host(
|
||||||
None, [volume], 'default')
|
None, [volume], 'default', [])
|
||||||
self.assertIn(driver.active_backend_id, ('', None))
|
self.assertIn(driver.active_backend_id, ('', None))
|
||||||
self.assertEqual(old_client, driver.replica_client)
|
self.assertEqual(old_client, driver.replica_client)
|
||||||
self.assertEqual(old_replica_client, driver.client)
|
self.assertEqual(old_replica_client, driver.client)
|
||||||
@ -4132,8 +4132,8 @@ class HuaweiISCSIDriverTestCase(HuaweiTestBase):
|
|||||||
old_client = driver.client
|
old_client = driver.client
|
||||||
old_replica_client = driver.replica_client
|
old_replica_client = driver.replica_client
|
||||||
old_replica = driver.replica
|
old_replica = driver.replica
|
||||||
secondary_id, volumes_update = driver.failover_host(
|
secondary_id, volumes_update, __ = driver.failover_host(
|
||||||
None, [volume], 'default')
|
None, [volume], 'default', [])
|
||||||
self.assertIn(driver.active_backend_id, ('', None))
|
self.assertIn(driver.active_backend_id, ('', None))
|
||||||
self.assertEqual(old_client, driver.replica_client)
|
self.assertEqual(old_client, driver.replica_client)
|
||||||
self.assertEqual(old_replica_client, driver.client)
|
self.assertEqual(old_replica_client, driver.client)
|
||||||
|
@ -2901,8 +2901,8 @@ class DS8KProxyTest(test.TestCase):
|
|||||||
pprc_pairs = copy.deepcopy(FAKE_GET_PPRCS_RESPONSE['data']['pprcs'])
|
pprc_pairs = copy.deepcopy(FAKE_GET_PPRCS_RESPONSE['data']['pprcs'])
|
||||||
pprc_pairs[0]['state'] = 'suspended'
|
pprc_pairs[0]['state'] = 'suspended'
|
||||||
mock_get_pprc_pairs.side_effect = [pprc_pairs]
|
mock_get_pprc_pairs.side_effect = [pprc_pairs]
|
||||||
secondary_id, volume_update_list = self.driver.failover_host(
|
secondary_id, volume_update_list, __ = self.driver.failover_host(
|
||||||
self.ctxt, [volume], TEST_TARGET_DS8K_IP)
|
self.ctxt, [volume], TEST_TARGET_DS8K_IP, [])
|
||||||
self.assertEqual(TEST_TARGET_DS8K_IP, secondary_id)
|
self.assertEqual(TEST_TARGET_DS8K_IP, secondary_id)
|
||||||
|
|
||||||
@mock.patch.object(replication.Replication, 'do_pprc_failover')
|
@mock.patch.object(replication.Replication, 'do_pprc_failover')
|
||||||
@ -2928,7 +2928,7 @@ class DS8KProxyTest(test.TestCase):
|
|||||||
restclient.APIException('failed to do failover.'))
|
restclient.APIException('failed to do failover.'))
|
||||||
self.assertRaises(exception.UnableToFailOver,
|
self.assertRaises(exception.UnableToFailOver,
|
||||||
self.driver.failover_host, self.ctxt,
|
self.driver.failover_host, self.ctxt,
|
||||||
[volume], TEST_TARGET_DS8K_IP)
|
[volume], TEST_TARGET_DS8K_IP, [])
|
||||||
|
|
||||||
def test_failover_host_to_invalid_target(self):
|
def test_failover_host_to_invalid_target(self):
|
||||||
"""Failover host to invalid secondary should fail."""
|
"""Failover host to invalid secondary should fail."""
|
||||||
@ -2947,7 +2947,7 @@ class DS8KProxyTest(test.TestCase):
|
|||||||
replication_driver_data=data)
|
replication_driver_data=data)
|
||||||
self.assertRaises(exception.InvalidReplicationTarget,
|
self.assertRaises(exception.InvalidReplicationTarget,
|
||||||
self.driver.failover_host, self.ctxt,
|
self.driver.failover_host, self.ctxt,
|
||||||
[volume], 'fake_target')
|
[volume], 'fake_target', [])
|
||||||
|
|
||||||
def test_failover_host_that_has_been_failed_over(self):
|
def test_failover_host_that_has_been_failed_over(self):
|
||||||
"""Failover host that has been failed over should just return."""
|
"""Failover host that has been failed over should just return."""
|
||||||
@ -2964,8 +2964,8 @@ class DS8KProxyTest(test.TestCase):
|
|||||||
volume = self._create_volume(volume_type_id=vol_type.id,
|
volume = self._create_volume(volume_type_id=vol_type.id,
|
||||||
provider_location=location,
|
provider_location=location,
|
||||||
replication_driver_data=data)
|
replication_driver_data=data)
|
||||||
secondary_id, volume_update_list = self.driver.failover_host(
|
secondary_id, volume_update_list, __ = self.driver.failover_host(
|
||||||
self.ctxt, [volume], TEST_TARGET_DS8K_IP)
|
self.ctxt, [volume], TEST_TARGET_DS8K_IP, [])
|
||||||
self.assertEqual(TEST_TARGET_DS8K_IP, secondary_id)
|
self.assertEqual(TEST_TARGET_DS8K_IP, secondary_id)
|
||||||
self.assertEqual([], volume_update_list)
|
self.assertEqual([], volume_update_list)
|
||||||
|
|
||||||
@ -2984,8 +2984,8 @@ class DS8KProxyTest(test.TestCase):
|
|||||||
volume = self._create_volume(volume_type_id=vol_type.id,
|
volume = self._create_volume(volume_type_id=vol_type.id,
|
||||||
provider_location=location,
|
provider_location=location,
|
||||||
replication_driver_data=data)
|
replication_driver_data=data)
|
||||||
secondary_id, volume_update_list = self.driver.failover_host(
|
secondary_id, volume_update_list, __ = self.driver.failover_host(
|
||||||
self.ctxt, [volume], 'default')
|
self.ctxt, [volume], 'default', [])
|
||||||
self.assertIsNone(secondary_id)
|
self.assertIsNone(secondary_id)
|
||||||
self.assertEqual([], volume_update_list)
|
self.assertEqual([], volume_update_list)
|
||||||
|
|
||||||
@ -3000,8 +3000,8 @@ class DS8KProxyTest(test.TestCase):
|
|||||||
location = six.text_type({'vol_hex_id': TEST_VOLUME_ID})
|
location = six.text_type({'vol_hex_id': TEST_VOLUME_ID})
|
||||||
volume = self._create_volume(volume_type_id=vol_type.id,
|
volume = self._create_volume(volume_type_id=vol_type.id,
|
||||||
provider_location=location)
|
provider_location=location)
|
||||||
secondary_id, volume_update_list = self.driver.failover_host(
|
secondary_id, volume_update_list, __ = self.driver.failover_host(
|
||||||
self.ctxt, [volume], TEST_TARGET_DS8K_IP)
|
self.ctxt, [volume], TEST_TARGET_DS8K_IP, [])
|
||||||
self.assertEqual(TEST_TARGET_DS8K_IP, secondary_id)
|
self.assertEqual(TEST_TARGET_DS8K_IP, secondary_id)
|
||||||
self.assertEqual('error', volume_update_list[0]['updates']['status'])
|
self.assertEqual('error', volume_update_list[0]['updates']['status'])
|
||||||
|
|
||||||
@ -3019,8 +3019,8 @@ class DS8KProxyTest(test.TestCase):
|
|||||||
})
|
})
|
||||||
volume = self._create_volume(volume_type_id=vol_type.id,
|
volume = self._create_volume(volume_type_id=vol_type.id,
|
||||||
provider_location=location)
|
provider_location=location)
|
||||||
secondary_id, volume_update_list = self.driver.failover_host(
|
secondary_id, volume_update_list, __ = self.driver.failover_host(
|
||||||
self.ctxt, [volume], 'default')
|
self.ctxt, [volume], 'default', [])
|
||||||
self.assertEqual('default', secondary_id)
|
self.assertEqual('default', secondary_id)
|
||||||
self.assertEqual('available',
|
self.assertEqual('available',
|
||||||
volume_update_list[0]['updates']['status'])
|
volume_update_list[0]['updates']['status'])
|
||||||
@ -3050,8 +3050,8 @@ class DS8KProxyTest(test.TestCase):
|
|||||||
mock_get_pprc_pairs.side_effect = [pprc_pairs_full_duplex,
|
mock_get_pprc_pairs.side_effect = [pprc_pairs_full_duplex,
|
||||||
pprc_pairs_suspended,
|
pprc_pairs_suspended,
|
||||||
pprc_pairs_full_duplex]
|
pprc_pairs_full_duplex]
|
||||||
secondary_id, volume_update_list = self.driver.failover_host(
|
secondary_id, volume_update_list, __ = self.driver.failover_host(
|
||||||
self.ctxt, [volume], 'default')
|
self.ctxt, [volume], 'default', [])
|
||||||
self.assertEqual('default', secondary_id)
|
self.assertEqual('default', secondary_id)
|
||||||
|
|
||||||
@mock.patch.object(replication.Replication, 'start_pprc_failback')
|
@mock.patch.object(replication.Replication, 'start_pprc_failback')
|
||||||
@ -3074,4 +3074,4 @@ class DS8KProxyTest(test.TestCase):
|
|||||||
restclient.APIException('failed to do failback.'))
|
restclient.APIException('failed to do failback.'))
|
||||||
self.assertRaises(exception.UnableToFailOver,
|
self.assertRaises(exception.UnableToFailOver,
|
||||||
self.driver.failover_host, self.ctxt,
|
self.driver.failover_host, self.ctxt,
|
||||||
[volume], 'default')
|
[volume], 'default', [])
|
||||||
|
@ -268,7 +268,7 @@ class IBMStorageFakeProxyDriver(object):
|
|||||||
def thaw_backend(self, context):
|
def thaw_backend(self, context):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def failover_host(self, context, volumes, secondary_id):
|
def failover_host(self, context, volumes, secondary_id, groups=None):
|
||||||
target_id = 'BLA'
|
target_id = 'BLA'
|
||||||
volume_update_list = []
|
volume_update_list = []
|
||||||
for volume in volumes:
|
for volume in volumes:
|
||||||
@ -279,7 +279,7 @@ class IBMStorageFakeProxyDriver(object):
|
|||||||
{'volume_id': volume['id'],
|
{'volume_id': volume['id'],
|
||||||
'updates': {'replication_status': status}})
|
'updates': {'replication_status': status}})
|
||||||
|
|
||||||
return target_id, volume_update_list
|
return target_id, volume_update_list, []
|
||||||
|
|
||||||
def enable_replication(self, context, group, volumes):
|
def enable_replication(self, context, group, volumes):
|
||||||
vol_status = []
|
vol_status = []
|
||||||
@ -916,10 +916,11 @@ class IBMStorageVolumeDriverTest(test.TestCase):
|
|||||||
{'volume_id': REPLICATED_VOLUME['id'],
|
{'volume_id': REPLICATED_VOLUME['id'],
|
||||||
'updates': {'replication_status': 'failed-over'}}]
|
'updates': {'replication_status': 'failed-over'}}]
|
||||||
|
|
||||||
target_id, volume_update_list = self.driver.failover_host(
|
target_id, volume_update_list, __ = self.driver.failover_host(
|
||||||
CONTEXT,
|
CONTEXT,
|
||||||
[replicated_volume],
|
[replicated_volume],
|
||||||
SECONDARY
|
SECONDARY,
|
||||||
|
[]
|
||||||
)
|
)
|
||||||
|
|
||||||
self.assertEqual(expected_target_id, target_id)
|
self.assertEqual(expected_target_id, target_id)
|
||||||
@ -939,10 +940,11 @@ class IBMStorageVolumeDriverTest(test.TestCase):
|
|||||||
{'volume_id': REPLICATED_VOLUME['id'],
|
{'volume_id': REPLICATED_VOLUME['id'],
|
||||||
'updates': {'replication_status': 'error'}}]
|
'updates': {'replication_status': 'error'}}]
|
||||||
|
|
||||||
target_id, volume_update_list = self.driver.failover_host(
|
target_id, volume_update_list, __ = self.driver.failover_host(
|
||||||
CONTEXT,
|
CONTEXT,
|
||||||
[replicated_volume],
|
[replicated_volume],
|
||||||
SECONDARY
|
SECONDARY,
|
||||||
|
[]
|
||||||
)
|
)
|
||||||
|
|
||||||
self.assertEqual(expected_target_id, target_id)
|
self.assertEqual(expected_target_id, target_id)
|
||||||
|
@ -6987,7 +6987,7 @@ class StorwizeSVCReplicationTestCase(test.TestCase):
|
|||||||
volumes = [volume, non_replica_vol, gmcv_volume]
|
volumes = [volume, non_replica_vol, gmcv_volume]
|
||||||
# Delete volume in failover state
|
# Delete volume in failover state
|
||||||
self.driver.failover_host(
|
self.driver.failover_host(
|
||||||
self.ctxt, volumes, self.rep_target['backend_id'])
|
self.ctxt, volumes, self.rep_target['backend_id'], [])
|
||||||
# Delete non-replicate volume in a failover state
|
# Delete non-replicate volume in a failover state
|
||||||
self.assertRaises(exception.VolumeDriverException,
|
self.assertRaises(exception.VolumeDriverException,
|
||||||
self.driver.delete_volume,
|
self.driver.delete_volume,
|
||||||
@ -7001,7 +7001,7 @@ class StorwizeSVCReplicationTestCase(test.TestCase):
|
|||||||
self._validate_replic_vol_deletion(gmcv_volume, True)
|
self._validate_replic_vol_deletion(gmcv_volume, True)
|
||||||
|
|
||||||
self.driver.failover_host(
|
self.driver.failover_host(
|
||||||
self.ctxt, volumes, 'default')
|
self.ctxt, volumes, 'default', [])
|
||||||
self.driver.delete_volume(non_replica_vol)
|
self.driver.delete_volume(non_replica_vol)
|
||||||
self._assert_vol_exists(non_replica_vol['name'], False)
|
self._assert_vol_exists(non_replica_vol['name'], False)
|
||||||
|
|
||||||
@ -7092,11 +7092,13 @@ class StorwizeSVCReplicationTestCase(test.TestCase):
|
|||||||
self.driver._replica_enabled = False
|
self.driver._replica_enabled = False
|
||||||
self.assertRaises(exception.UnableToFailOver,
|
self.assertRaises(exception.UnableToFailOver,
|
||||||
self.driver.failover_host,
|
self.driver.failover_host,
|
||||||
self.ctxt, volumes, self.rep_target['backend_id'])
|
self.ctxt, volumes, self.rep_target['backend_id'],
|
||||||
|
[])
|
||||||
self.driver._replica_enabled = True
|
self.driver._replica_enabled = True
|
||||||
self.assertRaises(exception.InvalidReplicationTarget,
|
self.assertRaises(exception.InvalidReplicationTarget,
|
||||||
self.driver.failover_host,
|
self.driver.failover_host,
|
||||||
self.ctxt, volumes, self.fake_target['backend_id'])
|
self.ctxt, volumes, self.fake_target['backend_id'],
|
||||||
|
[])
|
||||||
|
|
||||||
with mock.patch.object(storwize_svc_common.StorwizeHelpers,
|
with mock.patch.object(storwize_svc_common.StorwizeHelpers,
|
||||||
'get_system_info') as get_sys_info:
|
'get_system_info') as get_sys_info:
|
||||||
@ -7106,12 +7108,12 @@ class StorwizeSVCReplicationTestCase(test.TestCase):
|
|||||||
self.assertRaises(exception.UnableToFailOver,
|
self.assertRaises(exception.UnableToFailOver,
|
||||||
self.driver.failover_host,
|
self.driver.failover_host,
|
||||||
self.ctxt, volumes,
|
self.ctxt, volumes,
|
||||||
self.rep_target['backend_id'])
|
self.rep_target['backend_id'], [])
|
||||||
|
|
||||||
self.driver._active_backend_id = self.rep_target['backend_id']
|
self.driver._active_backend_id = self.rep_target['backend_id']
|
||||||
self.assertRaises(exception.UnableToFailOver,
|
self.assertRaises(exception.UnableToFailOver,
|
||||||
self.driver.failover_host,
|
self.driver.failover_host,
|
||||||
self.ctxt, volumes, 'default')
|
self.ctxt, volumes, 'default', [])
|
||||||
self.driver.delete_volume(mm_vol)
|
self.driver.delete_volume(mm_vol)
|
||||||
self.driver.delete_volume(gmcv_vol)
|
self.driver.delete_volume(gmcv_vol)
|
||||||
|
|
||||||
@ -7189,8 +7191,8 @@ class StorwizeSVCReplicationTestCase(test.TestCase):
|
|||||||
{'replication_status': fields.ReplicationStatus.FAILED_OVER},
|
{'replication_status': fields.ReplicationStatus.FAILED_OVER},
|
||||||
'volume_id': gmcv_vol['id']}]
|
'volume_id': gmcv_vol['id']}]
|
||||||
|
|
||||||
target_id, volume_list = self.driver.failover_host(
|
target_id, volume_list, __ = self.driver.failover_host(
|
||||||
self.ctxt, volumes, self.rep_target['backend_id'])
|
self.ctxt, volumes, self.rep_target['backend_id'], [])
|
||||||
self.assertEqual(self.rep_target['backend_id'], target_id)
|
self.assertEqual(self.rep_target['backend_id'], target_id)
|
||||||
self.assertEqual(expected_list, volume_list)
|
self.assertEqual(expected_list, volume_list)
|
||||||
|
|
||||||
@ -7206,8 +7208,8 @@ class StorwizeSVCReplicationTestCase(test.TestCase):
|
|||||||
self.driver.delete_volume(gm_vol)
|
self.driver.delete_volume(gm_vol)
|
||||||
self.driver.delete_volume(gmcv_vol)
|
self.driver.delete_volume(gmcv_vol)
|
||||||
|
|
||||||
target_id, volume_list = self.driver.failover_host(
|
target_id, volume_list, __ = self.driver.failover_host(
|
||||||
self.ctxt, volumes, None)
|
self.ctxt, volumes, None, [])
|
||||||
self.assertEqual(self.rep_target['backend_id'], target_id)
|
self.assertEqual(self.rep_target['backend_id'], target_id)
|
||||||
self.assertEqual([], volume_list)
|
self.assertEqual([], volume_list)
|
||||||
|
|
||||||
@ -7258,8 +7260,8 @@ class StorwizeSVCReplicationTestCase(test.TestCase):
|
|||||||
'volume_id': non_replica_vol['id']},
|
'volume_id': non_replica_vol['id']},
|
||||||
]
|
]
|
||||||
|
|
||||||
target_id, volume_list = self.driver.failover_host(
|
target_id, volume_list, __ = self.driver.failover_host(
|
||||||
self.ctxt, volumes, self.rep_target['backend_id'])
|
self.ctxt, volumes, self.rep_target['backend_id'], [])
|
||||||
self.assertEqual(self.rep_target['backend_id'], target_id)
|
self.assertEqual(self.rep_target['backend_id'], target_id)
|
||||||
self.assertEqual(expected_list, volume_list)
|
self.assertEqual(expected_list, volume_list)
|
||||||
|
|
||||||
@ -7271,15 +7273,15 @@ class StorwizeSVCReplicationTestCase(test.TestCase):
|
|||||||
self.assertTrue(update_storwize_state.called)
|
self.assertTrue(update_storwize_state.called)
|
||||||
self.assertTrue(update_volume_stats.called)
|
self.assertTrue(update_volume_stats.called)
|
||||||
|
|
||||||
target_id, volume_list = self.driver.failover_host(
|
target_id, volume_list, __ = self.driver.failover_host(
|
||||||
self.ctxt, volumes, None)
|
self.ctxt, volumes, None, [])
|
||||||
self.assertEqual(self.rep_target['backend_id'], target_id)
|
self.assertEqual(self.rep_target['backend_id'], target_id)
|
||||||
self.assertEqual([], volume_list)
|
self.assertEqual([], volume_list)
|
||||||
# Delete non-replicate volume in a failover state
|
# Delete non-replicate volume in a failover state
|
||||||
self.assertRaises(exception.VolumeDriverException,
|
self.assertRaises(exception.VolumeDriverException,
|
||||||
self.driver.delete_volume,
|
self.driver.delete_volume,
|
||||||
non_replica_vol)
|
non_replica_vol)
|
||||||
self.driver.failover_host(self.ctxt, volumes, 'default')
|
self.driver.failover_host(self.ctxt, volumes, 'default', [])
|
||||||
self.driver.delete_volume(mm_vol)
|
self.driver.delete_volume(mm_vol)
|
||||||
self.driver.delete_volume(gmcv_vol)
|
self.driver.delete_volume(gmcv_vol)
|
||||||
self.driver.delete_volume(non_replica_vol)
|
self.driver.delete_volume(non_replica_vol)
|
||||||
@ -7360,22 +7362,22 @@ class StorwizeSVCReplicationTestCase(test.TestCase):
|
|||||||
'status': 'available'},
|
'status': 'available'},
|
||||||
'volume_id': gmcv_vol['id']}]
|
'volume_id': gmcv_vol['id']}]
|
||||||
# Already failback
|
# Already failback
|
||||||
target_id, volume_list = self.driver.failover_host(
|
target_id, volume_list, __ = self.driver.failover_host(
|
||||||
self.ctxt, volumes, 'default')
|
self.ctxt, volumes, 'default', [])
|
||||||
self.assertIsNone(target_id)
|
self.assertIsNone(target_id)
|
||||||
self.assertEqual([], volume_list)
|
self.assertEqual([], volume_list)
|
||||||
|
|
||||||
# fail over operation
|
# fail over operation
|
||||||
target_id, volume_list = self.driver.failover_host(
|
target_id, volume_list, __ = self.driver.failover_host(
|
||||||
self.ctxt, volumes, self.rep_target['backend_id'])
|
self.ctxt, volumes, self.rep_target['backend_id'], [])
|
||||||
self.assertEqual(self.rep_target['backend_id'], target_id)
|
self.assertEqual(self.rep_target['backend_id'], target_id)
|
||||||
self.assertEqual(failover_expect, volume_list)
|
self.assertEqual(failover_expect, volume_list)
|
||||||
self.assertTrue(update_storwize_state.called)
|
self.assertTrue(update_storwize_state.called)
|
||||||
self.assertTrue(update_volume_stats.called)
|
self.assertTrue(update_volume_stats.called)
|
||||||
|
|
||||||
# fail back operation
|
# fail back operation
|
||||||
target_id, volume_list = self.driver.failover_host(
|
target_id, volume_list, __ = self.driver.failover_host(
|
||||||
self.ctxt, volumes, 'default')
|
self.ctxt, volumes, 'default', [])
|
||||||
self.assertEqual('default', target_id)
|
self.assertEqual('default', target_id)
|
||||||
self.assertEqual(failback_expect, volume_list)
|
self.assertEqual(failback_expect, volume_list)
|
||||||
self.assertIsNone(self.driver._active_backend_id)
|
self.assertIsNone(self.driver._active_backend_id)
|
||||||
@ -7450,14 +7452,14 @@ class StorwizeSVCReplicationTestCase(test.TestCase):
|
|||||||
'volume_id': non_replica_vol2['id']}]
|
'volume_id': non_replica_vol2['id']}]
|
||||||
|
|
||||||
# Already failback
|
# Already failback
|
||||||
target_id, volume_list = self.driver.failover_host(
|
target_id, volume_list, __ = self.driver.failover_host(
|
||||||
self.ctxt, volumes, 'default')
|
self.ctxt, volumes, 'default', [])
|
||||||
self.assertIsNone(target_id)
|
self.assertIsNone(target_id)
|
||||||
self.assertEqual([], volume_list)
|
self.assertEqual([], volume_list)
|
||||||
|
|
||||||
# fail over operation
|
# fail over operation
|
||||||
target_id, volume_list = self.driver.failover_host(
|
target_id, volume_list, __ = self.driver.failover_host(
|
||||||
self.ctxt, volumes, self.rep_target['backend_id'])
|
self.ctxt, volumes, self.rep_target['backend_id'], [])
|
||||||
self.assertEqual(self.rep_target['backend_id'], target_id)
|
self.assertEqual(self.rep_target['backend_id'], target_id)
|
||||||
self.assertEqual(failover_expect, volume_list)
|
self.assertEqual(failover_expect, volume_list)
|
||||||
self.assertTrue(update_storwize_state.called)
|
self.assertTrue(update_storwize_state.called)
|
||||||
@ -7489,8 +7491,8 @@ class StorwizeSVCReplicationTestCase(test.TestCase):
|
|||||||
{'updates': {'status': 'error',
|
{'updates': {'status': 'error',
|
||||||
'replication_driver_data': rep_data4},
|
'replication_driver_data': rep_data4},
|
||||||
'volume_id': gm_vol['id']}]
|
'volume_id': gm_vol['id']}]
|
||||||
target_id, volume_list = self.driver.failover_host(
|
target_id, volume_list, __ = self.driver.failover_host(
|
||||||
self.ctxt, volumes, 'default')
|
self.ctxt, volumes, 'default', [])
|
||||||
self.assertEqual('default', target_id)
|
self.assertEqual('default', target_id)
|
||||||
self.assertEqual(failback_expect, volume_list)
|
self.assertEqual(failback_expect, volume_list)
|
||||||
self.assertIsNone(self.driver._active_backend_id)
|
self.assertIsNone(self.driver._active_backend_id)
|
||||||
|
@ -542,7 +542,7 @@ class XIVProxyTest(test.TestCase):
|
|||||||
volume = {'id': 'WTF64', 'size': 16,
|
volume = {'id': 'WTF64', 'size': 16,
|
||||||
'name': 'WTF32', 'volume_type_id': 'WTF'}
|
'name': 'WTF32', 'volume_type_id': 'WTF'}
|
||||||
target = REPLICA_ID
|
target = REPLICA_ID
|
||||||
p.failover_host({}, [volume], target)
|
p.failover_host({}, [volume], target, [])
|
||||||
|
|
||||||
def test_failover_host_invalid_target(self):
|
def test_failover_host_invalid_target(self):
|
||||||
"""Test failover_host with invalid target"""
|
"""Test failover_host with invalid target"""
|
||||||
@ -559,7 +559,7 @@ class XIVProxyTest(test.TestCase):
|
|||||||
'name': 'WTF32', 'volume_type_id': 'WTF'}
|
'name': 'WTF32', 'volume_type_id': 'WTF'}
|
||||||
target = 'Invalid'
|
target = 'Invalid'
|
||||||
ex = getattr(p, "_get_exception")()
|
ex = getattr(p, "_get_exception")()
|
||||||
self.assertRaises(ex, p.failover_host, {}, [volume], target)
|
self.assertRaises(ex, p.failover_host, {}, [volume], target, [])
|
||||||
|
|
||||||
@mock.patch("cinder.volume.drivers.ibm.ibm_storage."
|
@mock.patch("cinder.volume.drivers.ibm.ibm_storage."
|
||||||
"xiv_proxy.client.XCLIClient")
|
"xiv_proxy.client.XCLIClient")
|
||||||
@ -585,7 +585,7 @@ class XIVProxyTest(test.TestCase):
|
|||||||
'name': 'WTF32', 'volume_type_id': 'WTF'}
|
'name': 'WTF32', 'volume_type_id': 'WTF'}
|
||||||
target = REPLICA_ID
|
target = REPLICA_ID
|
||||||
ex = getattr(p, "_get_exception")()
|
ex = getattr(p, "_get_exception")()
|
||||||
self.assertRaises(ex, p.failover_host, {}, [volume], target)
|
self.assertRaises(ex, p.failover_host, {}, [volume], target, [])
|
||||||
|
|
||||||
@mock.patch("cinder.volume.drivers.ibm.ibm_storage."
|
@mock.patch("cinder.volume.drivers.ibm.ibm_storage."
|
||||||
"xiv_proxy.client.XCLIClient")
|
"xiv_proxy.client.XCLIClient")
|
||||||
@ -606,7 +606,7 @@ class XIVProxyTest(test.TestCase):
|
|||||||
volume = {'id': 'WTF64', 'size': 16,
|
volume = {'id': 'WTF64', 'size': 16,
|
||||||
'name': 'WTF32', 'volume_type_id': 'WTF'}
|
'name': 'WTF32', 'volume_type_id': 'WTF'}
|
||||||
target = 'default'
|
target = 'default'
|
||||||
p.failover_host(None, [volume], target)
|
p.failover_host(None, [volume], target, [])
|
||||||
|
|
||||||
def qos_test_empty_name_if_no_specs(self):
|
def qos_test_empty_name_if_no_specs(self):
|
||||||
"""Test empty name in case no specs are specified"""
|
"""Test empty name in case no specs are specified"""
|
||||||
|
@ -710,8 +710,8 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
|
|||||||
return_value=fake_utils.SSC.keys())
|
return_value=fake_utils.SSC.keys())
|
||||||
self.mock_object(self.library, '_update_zapi_client')
|
self.mock_object(self.library, '_update_zapi_client')
|
||||||
|
|
||||||
actual_active, vol_updates = self.library.failover_host(
|
actual_active, vol_updates, __ = self.library.failover_host(
|
||||||
'fake_context', [], secondary_id='dev1')
|
'fake_context', [], secondary_id='dev1', groups=[])
|
||||||
|
|
||||||
data_motion.DataMotionMixin._complete_failover.assert_called_once_with(
|
data_motion.DataMotionMixin._complete_failover.assert_called_once_with(
|
||||||
'dev0', ['dev1', 'dev2'], fake_utils.SSC.keys(), [],
|
'dev0', ['dev1', 'dev2'], fake_utils.SSC.keys(), [],
|
||||||
|
@ -1409,8 +1409,8 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
|
|||||||
return_value=fake_ssc.SSC.keys())
|
return_value=fake_ssc.SSC.keys())
|
||||||
self.mock_object(self.driver, '_update_zapi_client')
|
self.mock_object(self.driver, '_update_zapi_client')
|
||||||
|
|
||||||
actual_active, vol_updates = self.driver.failover_host(
|
actual_active, vol_updates, __ = self.driver.failover_host(
|
||||||
'fake_context', [], secondary_id='dev1')
|
'fake_context', [], secondary_id='dev1', groups=[])
|
||||||
|
|
||||||
data_motion.DataMotionMixin._complete_failover.assert_called_once_with(
|
data_motion.DataMotionMixin._complete_failover.assert_called_once_with(
|
||||||
'dev0', ['dev1', 'dev2'], fake_ssc.SSC.keys(), [],
|
'dev0', ['dev1', 'dev2'], fake_ssc.SSC.keys(), [],
|
||||||
|
@ -400,14 +400,16 @@ class TestKaminarioISCSI(test.TestCase):
|
|||||||
self.driver.target = FakeKrest()
|
self.driver.target = FakeKrest()
|
||||||
self.driver.target.search().total = 1
|
self.driver.target.search().total = 1
|
||||||
self.driver.client.search().total = 1
|
self.driver.client.search().total = 1
|
||||||
backend_ip, res_volumes = self.driver.failover_host(None, volumes)
|
backend_ip, res_volumes, __ = self.driver.failover_host(
|
||||||
|
None, volumes, [])
|
||||||
self.assertEqual('10.0.0.1', backend_ip)
|
self.assertEqual('10.0.0.1', backend_ip)
|
||||||
status = res_volumes[0]['updates']['replication_status']
|
status = res_volumes[0]['updates']['replication_status']
|
||||||
self.assertEqual(fields.ReplicationStatus.FAILED_OVER, status)
|
self.assertEqual(fields.ReplicationStatus.FAILED_OVER, status)
|
||||||
# different backend ip
|
# different backend ip
|
||||||
self.driver.configuration.san_ip = '10.0.0.2'
|
self.driver.configuration.san_ip = '10.0.0.2'
|
||||||
self.driver.client.search().hits[0].state = 'in_sync'
|
self.driver.client.search().hits[0].state = 'in_sync'
|
||||||
backend_ip, res_volumes = self.driver.failover_host(None, volumes)
|
backend_ip, res_volumes, __ = self.driver.failover_host(
|
||||||
|
None, volumes, [])
|
||||||
self.assertEqual('10.0.0.2', backend_ip)
|
self.assertEqual('10.0.0.2', backend_ip)
|
||||||
status = res_volumes[0]['updates']['replication_status']
|
status = res_volumes[0]['updates']['replication_status']
|
||||||
self.assertEqual(fields.ReplicationStatus.DISABLED, status)
|
self.assertEqual(fields.ReplicationStatus.DISABLED, status)
|
||||||
|
@ -2111,10 +2111,11 @@ class PureBaseVolumeDriverTestCase(PureBaseSharedDriverTestCase):
|
|||||||
array2_v1_3.get_volume.return_value = REPLICATED_VOLUME_SNAPS
|
array2_v1_3.get_volume.return_value = REPLICATED_VOLUME_SNAPS
|
||||||
|
|
||||||
context = mock.MagicMock()
|
context = mock.MagicMock()
|
||||||
new_active_id, volume_updates = self.driver.failover_host(
|
new_active_id, volume_updates, __ = self.driver.failover_host(
|
||||||
context,
|
context,
|
||||||
REPLICATED_VOLUME_OBJS,
|
REPLICATED_VOLUME_OBJS,
|
||||||
None
|
None,
|
||||||
|
[]
|
||||||
)
|
)
|
||||||
|
|
||||||
self.assertEqual(secondary_device_id, new_active_id)
|
self.assertEqual(secondary_device_id, new_active_id)
|
||||||
|
@ -1555,7 +1555,7 @@ class RBDTestCase(test.TestCase):
|
|||||||
self.driver._is_replication_enabled = False
|
self.driver._is_replication_enabled = False
|
||||||
self.assertRaises(exception.UnableToFailOver,
|
self.assertRaises(exception.UnableToFailOver,
|
||||||
self.driver.failover_host,
|
self.driver.failover_host,
|
||||||
self.context, [self.volume_a])
|
self.context, [self.volume_a], [])
|
||||||
|
|
||||||
@ddt.data(None, 'tertiary-backend')
|
@ddt.data(None, 'tertiary-backend')
|
||||||
@common_mocks
|
@common_mocks
|
||||||
@ -1572,9 +1572,10 @@ class RBDTestCase(test.TestCase):
|
|||||||
remote = self.driver._replication_targets[1 if secondary_id else 0]
|
remote = self.driver._replication_targets[1 if secondary_id else 0]
|
||||||
mock_get_cfg.return_value = (remote['name'], remote)
|
mock_get_cfg.return_value = (remote['name'], remote)
|
||||||
|
|
||||||
res = self.driver.failover_host(self.context, volumes, secondary_id)
|
res = self.driver.failover_host(self.context, volumes, secondary_id,
|
||||||
|
[])
|
||||||
|
|
||||||
self.assertEqual((remote['name'], volumes), res)
|
self.assertEqual((remote['name'], volumes, []), res)
|
||||||
self.assertEqual(remote, self.driver._active_config)
|
self.assertEqual(remote, self.driver._active_config)
|
||||||
mock_failover_vol.assert_has_calls(
|
mock_failover_vol.assert_has_calls(
|
||||||
[mock.call(mock.ANY, v, remote, False,
|
[mock.call(mock.ANY, v, remote, False,
|
||||||
@ -1593,9 +1594,9 @@ class RBDTestCase(test.TestCase):
|
|||||||
|
|
||||||
remote = self.driver._get_target_config('default')
|
remote = self.driver._get_target_config('default')
|
||||||
volumes = [self.volume_a, self.volume_b]
|
volumes = [self.volume_a, self.volume_b]
|
||||||
res = self.driver.failover_host(self.context, volumes, 'default')
|
res = self.driver.failover_host(self.context, volumes, 'default', [])
|
||||||
|
|
||||||
self.assertEqual(('default', volumes), res)
|
self.assertEqual(('default', volumes, []), res)
|
||||||
self.assertEqual(remote, self.driver._active_config)
|
self.assertEqual(remote, self.driver._active_config)
|
||||||
mock_failover_vol.assert_has_calls(
|
mock_failover_vol.assert_has_calls(
|
||||||
[mock.call(mock.ANY, v, remote, False,
|
[mock.call(mock.ANY, v, remote, False,
|
||||||
@ -1613,7 +1614,7 @@ class RBDTestCase(test.TestCase):
|
|||||||
volumes = [self.volume_a, self.volume_b]
|
volumes = [self.volume_a, self.volume_b]
|
||||||
self.assertRaises(exception.InvalidReplicationTarget,
|
self.assertRaises(exception.InvalidReplicationTarget,
|
||||||
self.driver.failover_host,
|
self.driver.failover_host,
|
||||||
self.context, volumes, None)
|
self.context, volumes, None, [])
|
||||||
|
|
||||||
def test_failover_volume_non_replicated(self):
|
def test_failover_volume_non_replicated(self):
|
||||||
self.volume_a.replication_status = fields.ReplicationStatus.DISABLED
|
self.volume_a.replication_status = fields.ReplicationStatus.DISABLED
|
||||||
|
@ -141,11 +141,13 @@ class DriverTestCase(test.TestCase):
|
|||||||
with mock.patch.object(my_driver, 'failover_host') as failover_mock:
|
with mock.patch.object(my_driver, 'failover_host') as failover_mock:
|
||||||
res = my_driver.failover(mock.sentinel.context,
|
res = my_driver.failover(mock.sentinel.context,
|
||||||
mock.sentinel.volumes,
|
mock.sentinel.volumes,
|
||||||
secondary_id=mock.sentinel.secondary_id)
|
secondary_id=mock.sentinel.secondary_id,
|
||||||
|
groups=[])
|
||||||
self.assertEqual(failover_mock.return_value, res)
|
self.assertEqual(failover_mock.return_value, res)
|
||||||
failover_mock.assert_called_once_with(mock.sentinel.context,
|
failover_mock.assert_called_once_with(mock.sentinel.context,
|
||||||
mock.sentinel.volumes,
|
mock.sentinel.volumes,
|
||||||
mock.sentinel.secondary_id)
|
mock.sentinel.secondary_id,
|
||||||
|
[])
|
||||||
|
|
||||||
|
|
||||||
class BaseDriverTestCase(test.TestCase):
|
class BaseDriverTestCase(test.TestCase):
|
||||||
|
@ -26,6 +26,7 @@ from cinder.common import constants
|
|||||||
from cinder import exception
|
from cinder import exception
|
||||||
from cinder import objects
|
from cinder import objects
|
||||||
from cinder.objects import fields
|
from cinder.objects import fields
|
||||||
|
from cinder.tests.unit import fake_constants as fake
|
||||||
from cinder.tests.unit import fake_service
|
from cinder.tests.unit import fake_service
|
||||||
from cinder.tests.unit import utils
|
from cinder.tests.unit import utils
|
||||||
from cinder.tests.unit import volume as base
|
from cinder.tests.unit import volume as base
|
||||||
@ -74,7 +75,8 @@ class ReplicationTestCase(base.BaseVolumeTestCase):
|
|||||||
filters={'host': self.host})
|
filters={'host': self.host})
|
||||||
mock_failover.assert_called_once_with(self.context,
|
mock_failover.assert_called_once_with(self.context,
|
||||||
[],
|
[],
|
||||||
secondary_id=new_backend)
|
secondary_id=new_backend,
|
||||||
|
groups=[])
|
||||||
|
|
||||||
db_svc = objects.Service.get_by_id(self.context, svc.id)
|
db_svc = objects.Service.get_by_id(self.context, svc.id)
|
||||||
self.assertEqual(expected, db_svc.replication_status)
|
self.assertEqual(expected, db_svc.replication_status)
|
||||||
@ -269,14 +271,14 @@ class ReplicationTestCase(base.BaseVolumeTestCase):
|
|||||||
called, not_called = not_called, called
|
called, not_called = not_called, called
|
||||||
|
|
||||||
called.return_value = ('secondary', [{'volume_id': vol.id,
|
called.return_value = ('secondary', [{'volume_id': vol.id,
|
||||||
'updates': {'status': 'error'}}])
|
'updates': {'status': 'error'}}], [])
|
||||||
|
|
||||||
self.volume.failover(self.context,
|
self.volume.failover(self.context,
|
||||||
secondary_backend_id='secondary')
|
secondary_backend_id='secondary')
|
||||||
|
|
||||||
not_called.assert_not_called()
|
not_called.assert_not_called()
|
||||||
called.assert_called_once_with(self.context, [vol],
|
called.assert_called_once_with(self.context, [vol],
|
||||||
secondary_id='secondary')
|
secondary_id='secondary', groups=[])
|
||||||
|
|
||||||
expected_update = {'replication_status': rep_field.FAILED_OVER,
|
expected_update = {'replication_status': rep_field.FAILED_OVER,
|
||||||
'active_backend_id': 'secondary',
|
'active_backend_id': 'secondary',
|
||||||
@ -456,6 +458,8 @@ class ReplicationTestCase(base.BaseVolumeTestCase):
|
|||||||
def _test_failover_model_updates(self, in_volumes, in_snapshots,
|
def _test_failover_model_updates(self, in_volumes, in_snapshots,
|
||||||
driver_volumes, driver_result,
|
driver_volumes, driver_result,
|
||||||
out_volumes, out_snapshots,
|
out_volumes, out_snapshots,
|
||||||
|
in_groups=None, out_groups=None,
|
||||||
|
driver_group_result=None,
|
||||||
secondary_id=None):
|
secondary_id=None):
|
||||||
host = vol_utils.extract_host(self.manager.host)
|
host = vol_utils.extract_host(self.manager.host)
|
||||||
utils.create_service(self.context, {'host': host,
|
utils.create_service(self.context, {'host': host,
|
||||||
@ -466,9 +470,13 @@ class ReplicationTestCase(base.BaseVolumeTestCase):
|
|||||||
for snapshot in in_snapshots:
|
for snapshot in in_snapshots:
|
||||||
utils.create_snapshot(self.context, **snapshot)
|
utils.create_snapshot(self.context, **snapshot)
|
||||||
|
|
||||||
|
for group in in_groups:
|
||||||
|
utils.create_group(self.context, self.manager.host, **group)
|
||||||
|
|
||||||
with mock.patch.object(
|
with mock.patch.object(
|
||||||
self.manager.driver, 'failover_host',
|
self.manager.driver, 'failover_host',
|
||||||
return_value=(secondary_id, driver_result)) as driver_mock:
|
return_value=(secondary_id, driver_result,
|
||||||
|
driver_group_result)) as driver_mock:
|
||||||
self.manager.failover_host(self.context, secondary_id)
|
self.manager.failover_host(self.context, secondary_id)
|
||||||
|
|
||||||
self.assertSetEqual(driver_volumes,
|
self.assertSetEqual(driver_volumes,
|
||||||
@ -476,27 +484,56 @@ class ReplicationTestCase(base.BaseVolumeTestCase):
|
|||||||
|
|
||||||
self._check_failover_db(objects.VolumeList, out_volumes)
|
self._check_failover_db(objects.VolumeList, out_volumes)
|
||||||
self._check_failover_db(objects.SnapshotList, out_snapshots)
|
self._check_failover_db(objects.SnapshotList, out_snapshots)
|
||||||
|
self._check_failover_db(objects.GroupList, out_groups)
|
||||||
|
|
||||||
def test_failover_host_model_updates(self):
|
@mock.patch('cinder.volume.utils.is_group_a_type')
|
||||||
|
def test_failover_host_model_updates(self, mock_group_type):
|
||||||
status = fields.ReplicationStatus
|
status = fields.ReplicationStatus
|
||||||
# IDs will be overwritten with UUIDs, but they help follow the code
|
mock_group_type.return_value = True
|
||||||
in_volumes = [
|
in_groups = [
|
||||||
{'id': 0, 'status': 'available',
|
{'id': str(uuid.uuid4()), 'status': 'available',
|
||||||
'replication_status': status.DISABLED},
|
'group_type_id': fake.GROUP_TYPE_ID,
|
||||||
{'id': 1, 'status': 'in-use',
|
'volume_type_ids': [fake.VOLUME_TYPE_ID],
|
||||||
'replication_status': status.NOT_CAPABLE},
|
|
||||||
{'id': 2, 'status': 'available',
|
|
||||||
'replication_status': status.FAILOVER_ERROR},
|
'replication_status': status.FAILOVER_ERROR},
|
||||||
{'id': 3, 'status': 'in-use',
|
{'id': str(uuid.uuid4()), 'status': 'available',
|
||||||
'replication_status': status.ENABLED},
|
'group_type_id': fake.GROUP_TYPE_ID,
|
||||||
{'id': 4, 'status': 'available',
|
'volume_type_ids': [fake.VOLUME_TYPE_ID],
|
||||||
'replication_status': status.FAILOVER_ERROR},
|
'replication_status': status.ENABLED},
|
||||||
{'id': 5, 'status': 'in-use',
|
]
|
||||||
|
driver_group_result = [
|
||||||
|
{'group_id': in_groups[0]['id'],
|
||||||
|
'updates': {'replication_status': status.FAILOVER_ERROR}},
|
||||||
|
{'group_id': in_groups[1]['id'],
|
||||||
|
'updates': {'replication_status': status.FAILED_OVER}},
|
||||||
|
]
|
||||||
|
out_groups = [
|
||||||
|
{'id': in_groups[0]['id'], 'status': 'error',
|
||||||
|
'replication_status': status.FAILOVER_ERROR},
|
||||||
|
{'id': in_groups[1]['id'], 'status': in_groups[1]['status'],
|
||||||
|
'replication_status': status.FAILED_OVER},
|
||||||
|
]
|
||||||
|
|
||||||
|
# test volumes
|
||||||
|
in_volumes = [
|
||||||
|
{'id': str(uuid.uuid4()), 'status': 'available',
|
||||||
|
'replication_status': status.DISABLED},
|
||||||
|
{'id': str(uuid.uuid4()), 'status': 'in-use',
|
||||||
|
'replication_status': status.NOT_CAPABLE},
|
||||||
|
{'id': str(uuid.uuid4()), 'status': 'available',
|
||||||
|
'replication_status': status.FAILOVER_ERROR},
|
||||||
|
{'id': str(uuid.uuid4()), 'status': 'in-use',
|
||||||
|
'replication_status': status.ENABLED},
|
||||||
|
{'id': str(uuid.uuid4()), 'status': 'available',
|
||||||
|
'replication_status': status.FAILOVER_ERROR},
|
||||||
|
{'id': str(uuid.uuid4()), 'status': 'in-use',
|
||||||
|
'replication_status': status.ENABLED},
|
||||||
|
{'id': str(uuid.uuid4()), 'status': 'available',
|
||||||
|
'group_id': in_groups[0]['id'],
|
||||||
|
'replication_status': status.FAILOVER_ERROR},
|
||||||
|
{'id': str(uuid.uuid4()), 'status': 'available',
|
||||||
|
'group_id': in_groups[1]['id'],
|
||||||
'replication_status': status.ENABLED},
|
'replication_status': status.ENABLED},
|
||||||
]
|
]
|
||||||
# Generate real volume IDs
|
|
||||||
for volume in in_volumes:
|
|
||||||
volume['id'] = str(uuid.uuid4())
|
|
||||||
in_snapshots = [
|
in_snapshots = [
|
||||||
{'id': v['id'], 'volume_id': v['id'], 'status': 'available'}
|
{'id': v['id'], 'volume_id': v['id'], 'status': 'available'}
|
||||||
for v in in_volumes
|
for v in in_volumes
|
||||||
@ -512,6 +549,10 @@ class ReplicationTestCase(base.BaseVolumeTestCase):
|
|||||||
'updates': {'replication_status': status.FAILOVER_ERROR}},
|
'updates': {'replication_status': status.FAILOVER_ERROR}},
|
||||||
{'volume_id': in_volumes[5]['id'],
|
{'volume_id': in_volumes[5]['id'],
|
||||||
'updates': {'replication_status': status.FAILED_OVER}},
|
'updates': {'replication_status': status.FAILED_OVER}},
|
||||||
|
{'volume_id': in_volumes[6]['id'],
|
||||||
|
'updates': {'replication_status': status.FAILOVER_ERROR}},
|
||||||
|
{'volume_id': in_volumes[7]['id'],
|
||||||
|
'updates': {'replication_status': status.FAILED_OVER}},
|
||||||
]
|
]
|
||||||
out_volumes = [
|
out_volumes = [
|
||||||
{'id': in_volumes[0]['id'], 'status': 'error',
|
{'id': in_volumes[0]['id'], 'status': 'error',
|
||||||
@ -530,15 +571,23 @@ class ReplicationTestCase(base.BaseVolumeTestCase):
|
|||||||
'replication_status': status.FAILOVER_ERROR},
|
'replication_status': status.FAILOVER_ERROR},
|
||||||
{'id': in_volumes[5]['id'], 'status': in_volumes[5]['status'],
|
{'id': in_volumes[5]['id'], 'status': in_volumes[5]['status'],
|
||||||
'replication_status': status.FAILED_OVER},
|
'replication_status': status.FAILED_OVER},
|
||||||
|
{'id': in_volumes[6]['id'], 'status': 'error',
|
||||||
|
'previous_status': in_volumes[6]['status'],
|
||||||
|
'replication_status': status.FAILOVER_ERROR},
|
||||||
|
{'id': in_volumes[7]['id'], 'status': in_volumes[7]['status'],
|
||||||
|
'replication_status': status.FAILED_OVER},
|
||||||
]
|
]
|
||||||
out_snapshots = [
|
out_snapshots = [
|
||||||
{'id': ov['id'],
|
{'id': ov['id'],
|
||||||
'status': 'error' if ov['status'] == 'error' else 'available'}
|
'status': 'error' if ov['status'] == 'error' else 'available'}
|
||||||
for ov in out_volumes
|
for ov in out_volumes
|
||||||
]
|
]
|
||||||
|
|
||||||
self._test_failover_model_updates(in_volumes, in_snapshots,
|
self._test_failover_model_updates(in_volumes, in_snapshots,
|
||||||
driver_volumes, driver_result,
|
driver_volumes, driver_result,
|
||||||
out_volumes, out_snapshots)
|
out_volumes, out_snapshots,
|
||||||
|
in_groups, out_groups,
|
||||||
|
driver_group_result)
|
||||||
|
|
||||||
def test_failback_host_model_updates(self):
|
def test_failback_host_model_updates(self):
|
||||||
status = fields.ReplicationStatus
|
status = fields.ReplicationStatus
|
||||||
@ -612,4 +661,5 @@ class ReplicationTestCase(base.BaseVolumeTestCase):
|
|||||||
self._test_failover_model_updates(in_volumes, in_snapshots,
|
self._test_failover_model_updates(in_volumes, in_snapshots,
|
||||||
driver_volumes, driver_result,
|
driver_volumes, driver_result,
|
||||||
out_volumes, out_snapshots,
|
out_volumes, out_snapshots,
|
||||||
|
[], [], [],
|
||||||
self.manager.FAILBACK_SENTINEL)
|
self.manager.FAILBACK_SENTINEL)
|
||||||
|
@ -1458,7 +1458,7 @@ class BaseVD(object):
|
|||||||
"""
|
"""
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def failover_host(self, context, volumes, secondary_id=None):
|
def failover_host(self, context, volumes, secondary_id=None, groups=None):
|
||||||
"""Failover a backend to a secondary replication target.
|
"""Failover a backend to a secondary replication target.
|
||||||
|
|
||||||
Instructs a replication capable/configured backend to failover
|
Instructs a replication capable/configured backend to failover
|
||||||
@ -1481,8 +1481,9 @@ class BaseVD(object):
|
|||||||
:param volumes: list of volume objects, in case the driver needs
|
:param volumes: list of volume objects, in case the driver needs
|
||||||
to take action on them in some way
|
to take action on them in some way
|
||||||
:param secondary_id: Specifies rep target backend to fail over to
|
:param secondary_id: Specifies rep target backend to fail over to
|
||||||
:returns: ID of the backend that was failed-over to
|
:param groups: replication groups
|
||||||
and model update for volumes
|
:returns: ID of the backend that was failed-over to,
|
||||||
|
model update for volumes, and model update for groups
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# Example volume_updates data structure:
|
# Example volume_updates data structure:
|
||||||
@ -1490,15 +1491,18 @@ class BaseVD(object):
|
|||||||
# 'updates': {'provider_id': 8,
|
# 'updates': {'provider_id': 8,
|
||||||
# 'replication_status': 'failed-over',
|
# 'replication_status': 'failed-over',
|
||||||
# 'replication_extended_status': 'whatever',...}},]
|
# 'replication_extended_status': 'whatever',...}},]
|
||||||
|
# Example group_updates data structure:
|
||||||
|
# [{'group_id': <cinder-uuid>,
|
||||||
|
# 'updates': {'replication_status': 'failed-over',...}},]
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
def failover(self, context, volumes, secondary_id=None):
|
def failover(self, context, volumes, secondary_id=None, groups=None):
|
||||||
"""Like failover but for a host that is clustered.
|
"""Like failover but for a host that is clustered.
|
||||||
|
|
||||||
Most of the time this will be the exact same behavior as failover_host,
|
Most of the time this will be the exact same behavior as failover_host,
|
||||||
so if it's not overwritten, it is assumed to be the case.
|
so if it's not overwritten, it is assumed to be the case.
|
||||||
"""
|
"""
|
||||||
return self.failover_host(context, volumes, secondary_id)
|
return self.failover_host(context, volumes, secondary_id, groups)
|
||||||
|
|
||||||
def failover_completed(self, context, active_backend_id=None):
|
def failover_completed(self, context, active_backend_id=None):
|
||||||
"""This method is called after failover for clustered backends."""
|
"""This method is called after failover for clustered backends."""
|
||||||
|
@ -1784,7 +1784,7 @@ class SCCommonDriver(driver.ManageableVD,
|
|||||||
# Error and leave.
|
# Error and leave.
|
||||||
return model_update
|
return model_update
|
||||||
|
|
||||||
def failover_host(self, context, volumes, secondary_id=None):
|
def failover_host(self, context, volumes, secondary_id=None, groups=None):
|
||||||
"""Failover to secondary.
|
"""Failover to secondary.
|
||||||
|
|
||||||
:param context: security context
|
:param context: security context
|
||||||
@ -1808,7 +1808,7 @@ class SCCommonDriver(driver.ManageableVD,
|
|||||||
if self.failed_over:
|
if self.failed_over:
|
||||||
if secondary_id == 'default':
|
if secondary_id == 'default':
|
||||||
LOG.debug('failing back')
|
LOG.debug('failing back')
|
||||||
return 'default', self.failback_volumes(volumes)
|
return 'default', self.failback_volumes(volumes), []
|
||||||
raise exception.InvalidReplicationTarget(
|
raise exception.InvalidReplicationTarget(
|
||||||
reason=_('Already failed over'))
|
reason=_('Already failed over'))
|
||||||
|
|
||||||
@ -1851,7 +1851,7 @@ class SCCommonDriver(driver.ManageableVD,
|
|||||||
LOG.debug(self.failed_over)
|
LOG.debug(self.failed_over)
|
||||||
LOG.debug(self.active_backend_id)
|
LOG.debug(self.active_backend_id)
|
||||||
LOG.debug(self.replication_enabled)
|
LOG.debug(self.replication_enabled)
|
||||||
return destssn, volume_updates
|
return destssn, volume_updates, []
|
||||||
else:
|
else:
|
||||||
raise exception.InvalidReplicationTarget(reason=(
|
raise exception.InvalidReplicationTarget(reason=(
|
||||||
_('replication_failover failed. %s not found.') %
|
_('replication_failover failed. %s not found.') %
|
||||||
|
@ -1210,7 +1210,8 @@ class CommonAdapter(object):
|
|||||||
raise exception.InvalidInput(
|
raise exception.InvalidInput(
|
||||||
reason='Invalid backend_id specified.')
|
reason='Invalid backend_id specified.')
|
||||||
|
|
||||||
def failover_host(self, context, volumes, secondary_backend_id):
|
def failover_host(self, context, volumes, secondary_backend_id,
|
||||||
|
groups=None):
|
||||||
"""Fails over the volume back and forth.
|
"""Fails over the volume back and forth.
|
||||||
|
|
||||||
Driver needs to update following info for failed-over volume:
|
Driver needs to update following info for failed-over volume:
|
||||||
@ -1269,7 +1270,7 @@ class CommonAdapter(object):
|
|||||||
# any sequential request will be redirected to it.
|
# any sequential request will be redirected to it.
|
||||||
self.client = mirror_view.secondary_client
|
self.client = mirror_view.secondary_client
|
||||||
|
|
||||||
return secondary_backend_id, volume_update_list
|
return secondary_backend_id, volume_update_list, []
|
||||||
|
|
||||||
def get_pool_name(self, volume):
|
def get_pool_name(self, volume):
|
||||||
return self.client.get_pool_name(volume.name)
|
return self.client.get_pool_name(volume.name)
|
||||||
|
@ -290,9 +290,10 @@ class VNXDriver(driver.ManageableVD,
|
|||||||
def backup_use_temp_snapshot(self):
|
def backup_use_temp_snapshot(self):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def failover_host(self, context, volumes, secondary_id=None):
|
def failover_host(self, context, volumes, secondary_id=None, groups=None):
|
||||||
"""Fail-overs volumes from primary device to secondary."""
|
"""Fail-overs volumes from primary device to secondary."""
|
||||||
return self.adapter.failover_host(context, volumes, secondary_id)
|
return self.adapter.failover_host(context, volumes, secondary_id,
|
||||||
|
groups)
|
||||||
|
|
||||||
@utils.require_consistent_group_snapshot_enabled
|
@utils.require_consistent_group_snapshot_enabled
|
||||||
def create_group(self, context, group):
|
def create_group(self, context, group):
|
||||||
|
@ -714,7 +714,7 @@ class HPE3PARFCDriver(driver.ManageableVD,
|
|||||||
self._logout(common)
|
self._logout(common)
|
||||||
|
|
||||||
@utils.trace
|
@utils.trace
|
||||||
def failover_host(self, context, volumes, secondary_id=None):
|
def failover_host(self, context, volumes, secondary_id=None, groups=None):
|
||||||
"""Force failover to a secondary replication target."""
|
"""Force failover to a secondary replication target."""
|
||||||
common = self._login(timeout=30)
|
common = self._login(timeout=30)
|
||||||
try:
|
try:
|
||||||
@ -722,6 +722,6 @@ class HPE3PARFCDriver(driver.ManageableVD,
|
|||||||
active_backend_id, volume_updates = common.failover_host(
|
active_backend_id, volume_updates = common.failover_host(
|
||||||
context, volumes, secondary_id)
|
context, volumes, secondary_id)
|
||||||
self._active_backend_id = active_backend_id
|
self._active_backend_id = active_backend_id
|
||||||
return active_backend_id, volume_updates
|
return active_backend_id, volume_updates, []
|
||||||
finally:
|
finally:
|
||||||
self._logout(common)
|
self._logout(common)
|
||||||
|
@ -984,7 +984,7 @@ class HPE3PARISCSIDriver(driver.ManageableVD,
|
|||||||
self._logout(common)
|
self._logout(common)
|
||||||
|
|
||||||
@utils.trace
|
@utils.trace
|
||||||
def failover_host(self, context, volumes, secondary_id=None):
|
def failover_host(self, context, volumes, secondary_id=None, groups=None):
|
||||||
"""Force failover to a secondary replication target."""
|
"""Force failover to a secondary replication target."""
|
||||||
common = self._login(timeout=30)
|
common = self._login(timeout=30)
|
||||||
try:
|
try:
|
||||||
@ -992,6 +992,6 @@ class HPE3PARISCSIDriver(driver.ManageableVD,
|
|||||||
active_backend_id, volume_updates = common.failover_host(
|
active_backend_id, volume_updates = common.failover_host(
|
||||||
context, volumes, secondary_id)
|
context, volumes, secondary_id)
|
||||||
self._active_backend_id = active_backend_id
|
self._active_backend_id = active_backend_id
|
||||||
return active_backend_id, volume_updates
|
return active_backend_id, volume_updates, []
|
||||||
finally:
|
finally:
|
||||||
self._logout(common)
|
self._logout(common)
|
||||||
|
@ -1492,7 +1492,7 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver):
|
|||||||
|
|
||||||
# v2 replication methods
|
# v2 replication methods
|
||||||
@cinder_utils.trace
|
@cinder_utils.trace
|
||||||
def failover_host(self, context, volumes, secondary_id=None):
|
def failover_host(self, context, volumes, secondary_id=None, groups=None):
|
||||||
"""Force failover to a secondary replication target."""
|
"""Force failover to a secondary replication target."""
|
||||||
if secondary_id and secondary_id == self.FAILBACK_VALUE:
|
if secondary_id and secondary_id == self.FAILBACK_VALUE:
|
||||||
volume_update_list = self._replication_failback(volumes)
|
volume_update_list = self._replication_failback(volumes)
|
||||||
@ -1575,7 +1575,7 @@ class HPELeftHandISCSIDriver(driver.ISCSIDriver):
|
|||||||
|
|
||||||
self._active_backend_id = target_id
|
self._active_backend_id = target_id
|
||||||
|
|
||||||
return target_id, volume_update_list
|
return target_id, volume_update_list, []
|
||||||
|
|
||||||
def _do_replication_setup(self):
|
def _do_replication_setup(self):
|
||||||
default_san_ssh_port = self.configuration.hpelefthand_ssh_port
|
default_san_ssh_port = self.configuration.hpelefthand_ssh_port
|
||||||
|
@ -1838,7 +1838,7 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
|||||||
self.configuration)
|
self.configuration)
|
||||||
return secondary_id, volumes_update
|
return secondary_id, volumes_update
|
||||||
|
|
||||||
def failover_host(self, context, volumes, secondary_id=None):
|
def failover_host(self, context, volumes, secondary_id=None, groups=None):
|
||||||
"""Failover all volumes to secondary."""
|
"""Failover all volumes to secondary."""
|
||||||
if secondary_id == 'default':
|
if secondary_id == 'default':
|
||||||
secondary_id, volumes_update = self._failback(volumes)
|
secondary_id, volumes_update = self._failback(volumes)
|
||||||
@ -1850,7 +1850,7 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
|||||||
LOG.error(msg)
|
LOG.error(msg)
|
||||||
raise exception.VolumeBackendAPIException(data=msg)
|
raise exception.VolumeBackendAPIException(data=msg)
|
||||||
|
|
||||||
return secondary_id, volumes_update
|
return secondary_id, volumes_update, []
|
||||||
|
|
||||||
def initialize_connection_snapshot(self, snapshot, connector, **kwargs):
|
def initialize_connection_snapshot(self, snapshot, connector, **kwargs):
|
||||||
"""Map a snapshot to a host and return target iSCSI information."""
|
"""Map a snapshot to a host and return target iSCSI information."""
|
||||||
|
@ -1059,7 +1059,7 @@ class DS8KProxy(proxy.IBMStorageProxy):
|
|||||||
|
|
||||||
@proxy.logger
|
@proxy.logger
|
||||||
@proxy._trace_time
|
@proxy._trace_time
|
||||||
def failover_host(self, ctxt, volumes, secondary_id):
|
def failover_host(self, ctxt, volumes, secondary_id, groups=None):
|
||||||
"""Fail over the volume back and forth.
|
"""Fail over the volume back and forth.
|
||||||
|
|
||||||
if secondary_id is 'default', volumes will be failed back,
|
if secondary_id is 'default', volumes will be failed back,
|
||||||
@ -1070,12 +1070,12 @@ class DS8KProxy(proxy.IBMStorageProxy):
|
|||||||
if not self._active_backend_id:
|
if not self._active_backend_id:
|
||||||
LOG.info("Host has been failed back. doesn't need "
|
LOG.info("Host has been failed back. doesn't need "
|
||||||
"to fail back again.")
|
"to fail back again.")
|
||||||
return self._active_backend_id, volume_update_list
|
return self._active_backend_id, volume_update_list, []
|
||||||
else:
|
else:
|
||||||
if self._active_backend_id:
|
if self._active_backend_id:
|
||||||
LOG.info("Host has been failed over to %s.",
|
LOG.info("Host has been failed over to %s.",
|
||||||
self._active_backend_id)
|
self._active_backend_id)
|
||||||
return self._active_backend_id, volume_update_list
|
return self._active_backend_id, volume_update_list, []
|
||||||
|
|
||||||
backend_id = self._replication._target_helper.backend['id']
|
backend_id = self._replication._target_helper.backend['id']
|
||||||
if secondary_id is None:
|
if secondary_id is None:
|
||||||
@ -1134,4 +1134,4 @@ class DS8KProxy(proxy.IBMStorageProxy):
|
|||||||
self._switch_backend_connection(self._active_backend_id)
|
self._switch_backend_connection(self._active_backend_id)
|
||||||
self._active_backend_id = ""
|
self._active_backend_id = ""
|
||||||
|
|
||||||
return secondary_id, volume_update_list
|
return secondary_id, volume_update_list, []
|
||||||
|
@ -217,11 +217,11 @@ class IBMStorageDriver(san.SanDriver,
|
|||||||
|
|
||||||
return self.proxy.thaw_backend(context)
|
return self.proxy.thaw_backend(context)
|
||||||
|
|
||||||
def failover_host(self, context, volumes, secondary_id=None):
|
def failover_host(self, context, volumes, secondary_id=None, groups=None):
|
||||||
"""Failover a backend to a secondary replication target. """
|
"""Failover a backend to a secondary replication target. """
|
||||||
|
|
||||||
return self.proxy.failover_host(
|
return self.proxy.failover_host(
|
||||||
context, volumes, secondary_id)
|
context, volumes, secondary_id, groups)
|
||||||
|
|
||||||
def get_replication_status(self, context, volume):
|
def get_replication_status(self, context, volume):
|
||||||
"""Return replication status."""
|
"""Return replication status."""
|
||||||
|
@ -1280,7 +1280,7 @@ class XIVProxy(proxy.IBMStorageProxy):
|
|||||||
return False, msg
|
return False, msg
|
||||||
|
|
||||||
@proxy._trace_time
|
@proxy._trace_time
|
||||||
def failover_host(self, context, volumes, secondary_id):
|
def failover_host(self, context, volumes, secondary_id, groups=None):
|
||||||
"""Failover a full backend.
|
"""Failover a full backend.
|
||||||
|
|
||||||
Fails over the volume back and forth, if secondary_id is 'default',
|
Fails over the volume back and forth, if secondary_id is 'default',
|
||||||
@ -1300,7 +1300,7 @@ class XIVProxy(proxy.IBMStorageProxy):
|
|||||||
if self._using_default_backend():
|
if self._using_default_backend():
|
||||||
LOG.info("Host has been failed back. No need "
|
LOG.info("Host has been failed back. No need "
|
||||||
"to fail back again.")
|
"to fail back again.")
|
||||||
return self.active_backend_id, volume_update_list
|
return self.active_backend_id, volume_update_list, []
|
||||||
pool_slave = self.storage_info[storage.FLAG_KEYS['storage_pool']]
|
pool_slave = self.storage_info[storage.FLAG_KEYS['storage_pool']]
|
||||||
pool_master = self._get_target_params(
|
pool_master = self._get_target_params(
|
||||||
self.active_backend_id)['san_clustername']
|
self.active_backend_id)['san_clustername']
|
||||||
@ -1308,7 +1308,7 @@ class XIVProxy(proxy.IBMStorageProxy):
|
|||||||
else:
|
else:
|
||||||
if not self._using_default_backend():
|
if not self._using_default_backend():
|
||||||
LOG.info("Already failed over. No need to failover again.")
|
LOG.info("Already failed over. No need to failover again.")
|
||||||
return self.active_backend_id, volume_update_list
|
return self.active_backend_id, volume_update_list, []
|
||||||
# case: need to select a target
|
# case: need to select a target
|
||||||
if secondary_id is None:
|
if secondary_id is None:
|
||||||
secondary_id = self._get_target()
|
secondary_id = self._get_target()
|
||||||
@ -1393,7 +1393,7 @@ class XIVProxy(proxy.IBMStorageProxy):
|
|||||||
# set active backend id to secondary id
|
# set active backend id to secondary id
|
||||||
self.active_backend_id = secondary_id
|
self.active_backend_id = secondary_id
|
||||||
|
|
||||||
return secondary_id, volume_update_list
|
return secondary_id, volume_update_list, []
|
||||||
|
|
||||||
@proxy._trace_time
|
@proxy._trace_time
|
||||||
def retype(self, ctxt, volume, new_type, diff, host):
|
def retype(self, ctxt, volume, new_type, diff, host):
|
||||||
|
@ -2835,7 +2835,7 @@ class StorwizeSVCCommonDriver(san.SanDriver,
|
|||||||
LOG.debug("Exit: update volume copy status.")
|
LOG.debug("Exit: update volume copy status.")
|
||||||
|
|
||||||
# #### V2.1 replication methods #### #
|
# #### V2.1 replication methods #### #
|
||||||
def failover_host(self, context, volumes, secondary_id=None):
|
def failover_host(self, context, volumes, secondary_id=None, groups=None):
|
||||||
LOG.debug('enter: failover_host: secondary_id=%(id)s',
|
LOG.debug('enter: failover_host: secondary_id=%(id)s',
|
||||||
{'id': secondary_id})
|
{'id': secondary_id})
|
||||||
if not self._replica_enabled:
|
if not self._replica_enabled:
|
||||||
@ -2859,7 +2859,7 @@ class StorwizeSVCCommonDriver(san.SanDriver,
|
|||||||
|
|
||||||
LOG.debug('leave: failover_host: secondary_id=%(id)s',
|
LOG.debug('leave: failover_host: secondary_id=%(id)s',
|
||||||
{'id': secondary_id})
|
{'id': secondary_id})
|
||||||
return secondary_id, volumes_update
|
return secondary_id, volumes_update, []
|
||||||
|
|
||||||
def _replication_failback(self, ctxt, volumes):
|
def _replication_failback(self, ctxt, volumes):
|
||||||
"""Fail back all the volume on the secondary backend."""
|
"""Fail back all the volume on the secondary backend."""
|
||||||
|
@ -357,7 +357,7 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver):
|
|||||||
"changed to failed_over ", rsession_name)
|
"changed to failed_over ", rsession_name)
|
||||||
|
|
||||||
@kaminario_logger
|
@kaminario_logger
|
||||||
def failover_host(self, context, volumes, secondary_id=None):
|
def failover_host(self, context, volumes, secondary_id=None, groups=None):
|
||||||
"""Failover to replication target."""
|
"""Failover to replication target."""
|
||||||
volume_updates = []
|
volume_updates = []
|
||||||
back_end_ip = None
|
back_end_ip = None
|
||||||
@ -508,7 +508,7 @@ class KaminarioCinderDriver(cinder.volume.driver.ISCSIDriver):
|
|||||||
volume_updates.append({'volume_id': v['id'],
|
volume_updates.append({'volume_id': v['id'],
|
||||||
'updates': {'status': 'error', }})
|
'updates': {'status': 'error', }})
|
||||||
back_end_ip = self.replica.backend_id
|
back_end_ip = self.replica.backend_id
|
||||||
return back_end_ip, volume_updates
|
return back_end_ip, volume_updates, []
|
||||||
|
|
||||||
@kaminario_logger
|
@kaminario_logger
|
||||||
def _create_volume_replica_user_snap(self, k2, sess):
|
def _create_volume_replica_user_snap(self, k2, sess):
|
||||||
|
@ -453,7 +453,7 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary,
|
|||||||
self._mark_qos_policy_group_for_deletion(qos_policy_group_info)
|
self._mark_qos_policy_group_for_deletion(qos_policy_group_info)
|
||||||
super(NetAppBlockStorageCmodeLibrary, self).unmanage(volume)
|
super(NetAppBlockStorageCmodeLibrary, self).unmanage(volume)
|
||||||
|
|
||||||
def failover_host(self, context, volumes, secondary_id=None):
|
def failover_host(self, context, volumes, secondary_id=None, groups=None):
|
||||||
"""Failover a backend to a secondary replication target."""
|
"""Failover a backend to a secondary replication target."""
|
||||||
|
|
||||||
return self._failover_host(volumes, secondary_id=secondary_id)
|
return self._failover_host(volumes, secondary_id=secondary_id)
|
||||||
|
@ -130,5 +130,5 @@ class NetApp7modeFibreChannelDriver(driver.BaseVD,
|
|||||||
group, volumes, cgsnapshot=cgsnapshot, snapshots=snapshots,
|
group, volumes, cgsnapshot=cgsnapshot, snapshots=snapshots,
|
||||||
source_cg=source_cg, source_vols=source_vols)
|
source_cg=source_cg, source_vols=source_vols)
|
||||||
|
|
||||||
def failover_host(self, context, volumes, secondary_id=None):
|
def failover_host(self, context, volumes, secondary_id=None, groups=None):
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
@ -130,6 +130,6 @@ class NetAppCmodeFibreChannelDriver(driver.BaseVD,
|
|||||||
group, volumes, cgsnapshot=cgsnapshot, snapshots=snapshots,
|
group, volumes, cgsnapshot=cgsnapshot, snapshots=snapshots,
|
||||||
source_cg=source_cg, source_vols=source_vols)
|
source_cg=source_cg, source_vols=source_vols)
|
||||||
|
|
||||||
def failover_host(self, context, volumes, secondary_id=None):
|
def failover_host(self, context, volumes, secondary_id=None, groups=None):
|
||||||
return self.library.failover_host(
|
return self.library.failover_host(
|
||||||
context, volumes, secondary_id=secondary_id)
|
context, volumes, secondary_id=secondary_id)
|
||||||
|
@ -127,5 +127,5 @@ class NetApp7modeISCSIDriver(driver.BaseVD,
|
|||||||
group, volumes, cgsnapshot=cgsnapshot, snapshots=snapshots,
|
group, volumes, cgsnapshot=cgsnapshot, snapshots=snapshots,
|
||||||
source_cg=source_cg, source_vols=source_vols)
|
source_cg=source_cg, source_vols=source_vols)
|
||||||
|
|
||||||
def failover_host(self, context, volumes, secondary_id=None):
|
def failover_host(self, context, volumes, secondary_id=None, groups=None):
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
@ -127,6 +127,6 @@ class NetAppCmodeISCSIDriver(driver.BaseVD,
|
|||||||
group, volumes, cgsnapshot=cgsnapshot, snapshots=snapshots,
|
group, volumes, cgsnapshot=cgsnapshot, snapshots=snapshots,
|
||||||
source_cg=source_cg, source_vols=source_vols)
|
source_cg=source_cg, source_vols=source_vols)
|
||||||
|
|
||||||
def failover_host(self, context, volumes, secondary_id=None):
|
def failover_host(self, context, volumes, secondary_id=None, groups=None):
|
||||||
return self.library.failover_host(
|
return self.library.failover_host(
|
||||||
context, volumes, secondary_id=secondary_id)
|
context, volumes, secondary_id=secondary_id)
|
||||||
|
@ -709,7 +709,7 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
|
|||||||
|
|
||||||
super(NetAppCmodeNfsDriver, self).unmanage(volume)
|
super(NetAppCmodeNfsDriver, self).unmanage(volume)
|
||||||
|
|
||||||
def failover_host(self, context, volumes, secondary_id=None):
|
def failover_host(self, context, volumes, secondary_id=None, groups=None):
|
||||||
"""Failover a backend to a secondary replication target."""
|
"""Failover a backend to a secondary replication target."""
|
||||||
|
|
||||||
return self._failover_host(volumes, secondary_id=secondary_id)
|
return self._failover_host(volumes, secondary_id=secondary_id)
|
||||||
|
@ -603,7 +603,7 @@ class DataMotionMixin(object):
|
|||||||
|
|
||||||
return active_backend_name, volume_updates
|
return active_backend_name, volume_updates
|
||||||
|
|
||||||
def _failover_host(self, volumes, secondary_id=None):
|
def _failover_host(self, volumes, secondary_id=None, groups=None):
|
||||||
|
|
||||||
if secondary_id == self.backend_name:
|
if secondary_id == self.backend_name:
|
||||||
msg = _("Cannot failover to the same host as the primary.")
|
msg = _("Cannot failover to the same host as the primary.")
|
||||||
@ -641,4 +641,4 @@ class DataMotionMixin(object):
|
|||||||
self.failed_over = True
|
self.failed_over = True
|
||||||
self.failed_over_backend_name = active_backend_name
|
self.failed_over_backend_name = active_backend_name
|
||||||
|
|
||||||
return active_backend_name, volume_updates
|
return active_backend_name, volume_updates, []
|
||||||
|
@ -1364,7 +1364,7 @@ class PureBaseVolumeDriver(san.SanDriver):
|
|||||||
"message: %s", err.text)
|
"message: %s", err.text)
|
||||||
|
|
||||||
@pure_driver_debug_trace
|
@pure_driver_debug_trace
|
||||||
def failover_host(self, context, volumes, secondary_id=None):
|
def failover_host(self, context, volumes, secondary_id=None, groups=None):
|
||||||
"""Failover backend to a secondary array
|
"""Failover backend to a secondary array
|
||||||
|
|
||||||
This action will not affect the original volumes in any
|
This action will not affect the original volumes in any
|
||||||
@ -1377,7 +1377,7 @@ class PureBaseVolumeDriver(san.SanDriver):
|
|||||||
# our current array back to the primary.
|
# our current array back to the primary.
|
||||||
if self._failed_over_primary_array:
|
if self._failed_over_primary_array:
|
||||||
self._set_current_array(self._failed_over_primary_array)
|
self._set_current_array(self._failed_over_primary_array)
|
||||||
return secondary_id, []
|
return secondary_id, [], []
|
||||||
else:
|
else:
|
||||||
msg = _('Unable to failback to "default", this can only be '
|
msg = _('Unable to failback to "default", this can only be '
|
||||||
'done after a failover has completed.')
|
'done after a failover has completed.')
|
||||||
@ -1455,7 +1455,7 @@ class PureBaseVolumeDriver(san.SanDriver):
|
|||||||
# secondary array we just failed over to.
|
# secondary array we just failed over to.
|
||||||
self._failed_over_primary_array = self._get_current_array()
|
self._failed_over_primary_array = self._get_current_array()
|
||||||
self._set_current_array(secondary_array)
|
self._set_current_array(secondary_array)
|
||||||
return secondary_array._backend_id, model_updates
|
return secondary_array._backend_id, model_updates, []
|
||||||
|
|
||||||
def _does_pgroup_exist(self, array, pgroup_name):
|
def _does_pgroup_exist(self, array, pgroup_name):
|
||||||
"""Return True/False"""
|
"""Return True/False"""
|
||||||
|
@ -997,7 +997,7 @@ class RBDDriver(driver.CloneableImageVD,
|
|||||||
secondary_id = candidates.pop()
|
secondary_id = candidates.pop()
|
||||||
return secondary_id, self._get_target_config(secondary_id)
|
return secondary_id, self._get_target_config(secondary_id)
|
||||||
|
|
||||||
def failover_host(self, context, volumes, secondary_id=None):
|
def failover_host(self, context, volumes, secondary_id=None, groups=None):
|
||||||
"""Failover to replication target."""
|
"""Failover to replication target."""
|
||||||
LOG.info('RBD driver failover started.')
|
LOG.info('RBD driver failover started.')
|
||||||
if not self._is_replication_enabled:
|
if not self._is_replication_enabled:
|
||||||
@ -1020,7 +1020,7 @@ class RBDDriver(driver.CloneableImageVD,
|
|||||||
self._active_backend_id = secondary_id
|
self._active_backend_id = secondary_id
|
||||||
self._active_config = remote
|
self._active_config = remote
|
||||||
LOG.info('RBD driver failover completed.')
|
LOG.info('RBD driver failover completed.')
|
||||||
return secondary_id, updates
|
return secondary_id, updates, []
|
||||||
|
|
||||||
def ensure_export(self, context, volume):
|
def ensure_export(self, context, volume):
|
||||||
"""Synchronously recreates an export for a logical volume."""
|
"""Synchronously recreates an export for a logical volume."""
|
||||||
|
@ -2036,7 +2036,7 @@ class SolidFireDriver(san.SanISCSIDriver):
|
|||||||
self._issue_api_request('ModifyVolume', params,
|
self._issue_api_request('ModifyVolume', params,
|
||||||
endpoint=remote['endpoint'])
|
endpoint=remote['endpoint'])
|
||||||
|
|
||||||
def failover_host(self, context, volumes, secondary_id=None):
|
def failover_host(self, context, volumes, secondary_id=None, groups=None):
|
||||||
"""Failover to replication target."""
|
"""Failover to replication target."""
|
||||||
volume_updates = []
|
volume_updates = []
|
||||||
remote = None
|
remote = None
|
||||||
@ -2100,7 +2100,7 @@ class SolidFireDriver(san.SanISCSIDriver):
|
|||||||
# but for now that's going to be the trade off of using replciation
|
# but for now that's going to be the trade off of using replciation
|
||||||
self.active_cluster_info = remote
|
self.active_cluster_info = remote
|
||||||
self.failed_over = True
|
self.failed_over = True
|
||||||
return remote['mvip'], volume_updates
|
return remote['mvip'], volume_updates, []
|
||||||
|
|
||||||
def freeze_backend(self, context):
|
def freeze_backend(self, context):
|
||||||
"""Freeze backend notification."""
|
"""Freeze backend notification."""
|
||||||
|
@ -2383,9 +2383,9 @@ class VolumeManager(manager.CleanableManager,
|
|||||||
# and update db
|
# and update db
|
||||||
if volume_stats.get('replication_status') == (
|
if volume_stats.get('replication_status') == (
|
||||||
fields.ReplicationStatus.ERROR):
|
fields.ReplicationStatus.ERROR):
|
||||||
backend = vol_utils.extract_host(self.host, 'backend')
|
filters = self._get_cluster_or_host_filters()
|
||||||
groups = vol_utils.get_replication_groups_by_host(
|
groups = objects.GroupList.get_all_replicated(
|
||||||
context, backend)
|
context, filters=filters)
|
||||||
group_model_updates, volume_model_updates = (
|
group_model_updates, volume_model_updates = (
|
||||||
self.driver.get_replication_error_status(context,
|
self.driver.get_replication_error_status(context,
|
||||||
groups))
|
groups))
|
||||||
@ -2811,11 +2811,15 @@ class VolumeManager(manager.CleanableManager,
|
|||||||
|
|
||||||
return vol_ref
|
return vol_ref
|
||||||
|
|
||||||
def _get_my_resources(self, ctxt, ovo_class_list):
|
def _get_cluster_or_host_filters(self):
|
||||||
if self.cluster:
|
if self.cluster:
|
||||||
filters = {'cluster_name': self.cluster}
|
filters = {'cluster_name': self.cluster}
|
||||||
else:
|
else:
|
||||||
filters = {'host': self.host}
|
filters = {'host': self.host}
|
||||||
|
return filters
|
||||||
|
|
||||||
|
def _get_my_resources(self, ctxt, ovo_class_list):
|
||||||
|
filters = self._get_cluster_or_host_filters()
|
||||||
return getattr(ovo_class_list, 'get_all')(ctxt, filters=filters)
|
return getattr(ovo_class_list, 'get_all')(ctxt, filters=filters)
|
||||||
|
|
||||||
def _get_my_volumes(self, ctxt):
|
def _get_my_volumes(self, ctxt):
|
||||||
@ -3961,6 +3965,7 @@ class VolumeManager(manager.CleanableManager,
|
|||||||
snapshot.save()
|
snapshot.save()
|
||||||
|
|
||||||
volume_update_list = None
|
volume_update_list = None
|
||||||
|
group_update_list = None
|
||||||
try:
|
try:
|
||||||
# For non clustered we can call v2.1 failover_host, but for
|
# For non clustered we can call v2.1 failover_host, but for
|
||||||
# clustered we call a/a failover method. We know a/a method
|
# clustered we call a/a failover method. We know a/a method
|
||||||
@ -3971,17 +3976,30 @@ class VolumeManager(manager.CleanableManager,
|
|||||||
# expected form of volume_update_list:
|
# expected form of volume_update_list:
|
||||||
# [{volume_id: <cinder-volid>, updates: {'provider_id': xxxx....}},
|
# [{volume_id: <cinder-volid>, updates: {'provider_id': xxxx....}},
|
||||||
# {volume_id: <cinder-volid>, updates: {'provider_id': xxxx....}}]
|
# {volume_id: <cinder-volid>, updates: {'provider_id': xxxx....}}]
|
||||||
|
# It includes volumes in replication groups and those not in them
|
||||||
active_backend_id, volume_update_list = failover(
|
# expected form of group_update_list:
|
||||||
context,
|
# [{group_id: <cinder-grpid>, updates: {'xxxx': xxxx....}},
|
||||||
|
# {group_id: <cinder-grpid>, updates: {'xxxx': xxxx....}}]
|
||||||
|
filters = self._get_cluster_or_host_filters()
|
||||||
|
groups = objects.GroupList.get_all_replicated(context,
|
||||||
|
filters=filters)
|
||||||
|
active_backend_id, volume_update_list, group_update_list = (
|
||||||
|
failover(context,
|
||||||
replicated_vols,
|
replicated_vols,
|
||||||
secondary_id=secondary_backend_id)
|
secondary_id=secondary_backend_id,
|
||||||
|
groups=groups))
|
||||||
try:
|
try:
|
||||||
update_data = {u['volume_id']: u['updates']
|
update_data = {u['volume_id']: u['updates']
|
||||||
for u in volume_update_list}
|
for u in volume_update_list}
|
||||||
except KeyError:
|
except KeyError:
|
||||||
msg = "Update list, doesn't include volume_id"
|
msg = "Update list, doesn't include volume_id"
|
||||||
raise exception.ProgrammingError(reason=msg)
|
raise exception.ProgrammingError(reason=msg)
|
||||||
|
try:
|
||||||
|
update_group_data = {g['group_id']: g['updates']
|
||||||
|
for g in group_update_list}
|
||||||
|
except KeyError:
|
||||||
|
msg = "Update list, doesn't include group_id"
|
||||||
|
raise exception.ProgrammingError(reason=msg)
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
# NOTE(jdg): Drivers need to be aware if they fail during
|
# NOTE(jdg): Drivers need to be aware if they fail during
|
||||||
# a failover sequence, we're expecting them to cleanup
|
# a failover sequence, we're expecting them to cleanup
|
||||||
@ -4046,6 +4064,19 @@ class VolumeManager(manager.CleanableManager,
|
|||||||
volume.update(update)
|
volume.update(update)
|
||||||
volume.save()
|
volume.save()
|
||||||
|
|
||||||
|
for grp in groups:
|
||||||
|
update = update_group_data.get(grp.id, {})
|
||||||
|
if update.get('status', '') == 'error':
|
||||||
|
update['replication_status'] = repl_status.FAILOVER_ERROR
|
||||||
|
elif update.get('replication_status') in (None,
|
||||||
|
repl_status.FAILED_OVER):
|
||||||
|
update['replication_status'] = updates['replication_status']
|
||||||
|
|
||||||
|
if update['replication_status'] == repl_status.FAILOVER_ERROR:
|
||||||
|
update.setdefault('status', 'error')
|
||||||
|
grp.update(update)
|
||||||
|
grp.save()
|
||||||
|
|
||||||
LOG.info("Failed over to replication target successfully.")
|
LOG.info("Failed over to replication target successfully.")
|
||||||
|
|
||||||
# TODO(geguileo): In P - remove this
|
# TODO(geguileo): In P - remove this
|
||||||
|
@ -935,29 +935,3 @@ def is_group_a_type(group, key):
|
|||||||
)
|
)
|
||||||
return spec == "<is> True"
|
return spec == "<is> True"
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def is_group_a_non_consistent_replication_group_type(group):
|
|
||||||
return is_group_a_type(group, "group_replication_enabled")
|
|
||||||
|
|
||||||
|
|
||||||
def is_group_a_consistent_replication_group_type(group):
|
|
||||||
return is_group_a_type(group, "consistent_group_replication_enabled")
|
|
||||||
|
|
||||||
|
|
||||||
def is_group_a_replication_group_type(group):
|
|
||||||
if (is_group_a_non_consistent_replication_group_type(group) or
|
|
||||||
is_group_a_consistent_replication_group_type(group)):
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def get_replication_groups_by_host(ctxt, host):
|
|
||||||
groups = []
|
|
||||||
filters = {'host': host, 'backend_match_level': 'backend'}
|
|
||||||
grps = objects.GroupList.get_all(ctxt, filters=filters)
|
|
||||||
for grp in grps:
|
|
||||||
if is_group_a_replication_group_type(grp):
|
|
||||||
groups.append(grp)
|
|
||||||
|
|
||||||
return groups
|
|
||||||
|
Loading…
Reference in New Issue
Block a user