Huawei: Implement v2.1 replication
This patch implements the v2.1 replication in the HuaweiDriver. All function in replication v2 are removed. A new function, failover_host, is added to support failover between backends. When error occurred on primary backend, you can failover to the secondary backend. But it's strongly recommended to recover the primary backend. After failover backend, the operations on the secondary backend are limited. A simple example, if you create a new volume on secondary, after failback, it can't be used. So, You should not do that, such as create, delete, extend volumes, etc. But you can detach volume if it's in in-use status, then do attach to vm to make replication volumes R&W. The configuration for v2.1 is almost the same as v2. Except that, only one backend configuration is needed. A simple example as follows: [replica] volume_driver = cinder.volume.drivers.huawei.huawei_driver.HuaweiISCSIDriver cinder_huawei_conf_file = /etc/cinder/cinder_huawei_conf.xml replication_device = backend_id:huawei-replica-1, storage_pool:pool_1, san_address:san_url_1;san_url_2, iscsi_default_target_ip:192.168.1.100, san_user:admin,san_password:passwd Change-Id: Ib555707e9751f78ce00c9dcdbbe370a18ed0ae6c Closes-Bug: #1556450
This commit is contained in:
parent
7d6da7cda9
commit
eb3fcbb9bc
@ -13,6 +13,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""Tests for huawei drivers."""
|
||||
import copy
|
||||
import ddt
|
||||
import json
|
||||
import mock
|
||||
@ -63,6 +64,7 @@ test_volume = {'name': 'volume-21ec7341-9256-497b-97d9-ef48edcf0635',
|
||||
'volume_type_id': None,
|
||||
'host': 'ubuntu001@backend001#OpenStack_Pool',
|
||||
'provider_location': '11',
|
||||
'status': 'available',
|
||||
}
|
||||
|
||||
fake_smartx_value = {'smarttier': 'true',
|
||||
@ -1743,7 +1745,10 @@ def Fake_sleep(time):
|
||||
pass
|
||||
|
||||
|
||||
class FakeHuaweiConf(object):
|
||||
REPLICA_BACKEND_ID = 'huawei-replica-1'
|
||||
|
||||
|
||||
class FakeHuaweiConf(huawei_conf.HuaweiConf):
|
||||
def __init__(self, conf, protocol):
|
||||
self.conf = conf
|
||||
self.protocol = protocol
|
||||
@ -1789,8 +1794,8 @@ class FakeHuaweiConf(object):
|
||||
'TargetPortGroup': 'portgroup-test', }
|
||||
setattr(self.conf, 'iscsi_info', [iscsi_info])
|
||||
|
||||
targets = [{'target_device_id': 'huawei-replica-1',
|
||||
'managed_backend_name': 'ubuntu@huawei2#OpenStack_Pool',
|
||||
targets = [{'backend_id': REPLICA_BACKEND_ID,
|
||||
'storage_pool': 'OpenStack_Pool',
|
||||
'san_address':
|
||||
'https://192.0.2.69:8088/deviceManager/rest/',
|
||||
'san_user': 'admin',
|
||||
@ -1870,16 +1875,23 @@ class FakeISCSIStorage(huawei_driver.HuaweiISCSIDriver):
|
||||
def __init__(self, configuration):
|
||||
self.configuration = configuration
|
||||
self.huawei_conf = FakeHuaweiConf(self.configuration, 'iSCSI')
|
||||
self.active_backend_id = None
|
||||
self.replica = None
|
||||
|
||||
def do_setup(self):
|
||||
self.metro_flag = True
|
||||
self.huawei_conf.update_config_value()
|
||||
self.get_local_and_remote_dev_conf()
|
||||
|
||||
self.client = FakeClient(configuration=self.configuration)
|
||||
self.rmt_client = FakeClient(configuration=self.configuration)
|
||||
self.replica_client = FakeClient(configuration=self.configuration)
|
||||
self.metro = hypermetro.HuaweiHyperMetro(self.client,
|
||||
self.rmt_client,
|
||||
self.configuration)
|
||||
self.replica = FakeReplicaPairManager(self.client, self.configuration)
|
||||
self.replica = FakeReplicaPairManager(self.client,
|
||||
self.replica_client,
|
||||
self.configuration)
|
||||
|
||||
|
||||
class FakeFCStorage(huawei_driver.HuaweiFCDriver):
|
||||
@ -1889,16 +1901,23 @@ class FakeFCStorage(huawei_driver.HuaweiFCDriver):
|
||||
self.configuration = configuration
|
||||
self.fcsan = None
|
||||
self.huawei_conf = FakeHuaweiConf(self.configuration, 'iSCSI')
|
||||
self.active_backend_id = None
|
||||
self.replica = None
|
||||
|
||||
def do_setup(self):
|
||||
self.metro_flag = True
|
||||
self.huawei_conf.update_config_value()
|
||||
self.get_local_and_remote_dev_conf()
|
||||
|
||||
self.client = FakeClient(configuration=self.configuration)
|
||||
self.rmt_client = FakeClient(configuration=self.configuration)
|
||||
self.replica_client = FakeClient(configuration=self.configuration)
|
||||
self.metro = hypermetro.HuaweiHyperMetro(self.client,
|
||||
self.rmt_client,
|
||||
self.configuration)
|
||||
self.replica = FakeReplicaPairManager(self.client, self.configuration)
|
||||
self.replica = FakeReplicaPairManager(self.client,
|
||||
self.replica_client,
|
||||
self.configuration)
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
@ -1992,7 +2011,7 @@ class HuaweiISCSIDriverTestCase(test.TestCase):
|
||||
'rmt_lun_id': '1'}
|
||||
driver_data = replication.to_string(driver_data)
|
||||
self.assertEqual(driver_data, model_update['replication_driver_data'])
|
||||
self.assertEqual('enabled', model_update['replication_status'])
|
||||
self.assertEqual('available', model_update['replication_status'])
|
||||
|
||||
def test_initialize_connection_success(self):
|
||||
iscsi_properties = self.driver.initialize_connection(test_volume,
|
||||
@ -2708,14 +2727,6 @@ class HuaweiISCSIDriverTestCase(test.TestCase):
|
||||
self.driver.unmanage_snapshot(test_snapshot)
|
||||
self.assertEqual(1, mock_rename.call_count)
|
||||
|
||||
def test_init_rmt_client(self):
|
||||
self.mock_object(rest_client, 'RestClient',
|
||||
mock.Mock(return_value=None))
|
||||
replica = replication.ReplicaPairManager(self.driver.client,
|
||||
self.configuration)
|
||||
self.assertEqual(replica.rmt_pool, 'OpenStack_Pool')
|
||||
self.assertEqual(replica.target_dev_id, 'huawei-replica-1')
|
||||
|
||||
@ddt.data(sync_replica_specs, async_replica_specs)
|
||||
def test_create_replication_success(self, mock_type):
|
||||
self.mock_object(replication.ReplicaCommonDriver, 'sync')
|
||||
@ -2729,7 +2740,7 @@ class HuaweiISCSIDriverTestCase(test.TestCase):
|
||||
'rmt_lun_id': '1'}
|
||||
driver_data = replication.to_string(driver_data)
|
||||
self.assertEqual(driver_data, model_update['replication_driver_data'])
|
||||
self.assertEqual('enabled', model_update['replication_status'])
|
||||
self.assertEqual('available', model_update['replication_status'])
|
||||
|
||||
@ddt.data(
|
||||
[
|
||||
@ -2795,6 +2806,7 @@ class HuaweiISCSIDriverTestCase(test.TestCase):
|
||||
|
||||
def test_wait_volume_online(self):
|
||||
replica = FakeReplicaPairManager(self.driver.client,
|
||||
self.driver.replica_client,
|
||||
self.configuration)
|
||||
lun_info = {'ID': '11'}
|
||||
|
||||
@ -2859,124 +2871,240 @@ class HuaweiISCSIDriverTestCase(test.TestCase):
|
||||
self.assertRaises(exception.VolumeBackendAPIException,
|
||||
common_driver.wait_replica_ready, pair_id)
|
||||
|
||||
def test_replication_enable_success(self):
|
||||
self.mock_object(replication.ReplicaCommonDriver, 'unprotect_second')
|
||||
self.mock_object(replication.ReplicaCommonDriver, 'split')
|
||||
self.mock_object(replication.PairOp, 'is_primary',
|
||||
mock.Mock(side_effect=[False, True]))
|
||||
self.driver.replication_enable(None, replication_volume)
|
||||
def test_failover_to_current(self):
|
||||
driver = FakeISCSIStorage(configuration=self.configuration)
|
||||
driver.do_setup()
|
||||
old_client = driver.client
|
||||
old_replica_client = driver.replica_client
|
||||
old_replica = driver.replica
|
||||
secondary_id, volumes_update = driver.failover_host(
|
||||
None, [test_volume], 'default')
|
||||
self.assertTrue(driver.active_backend_id in ('', None))
|
||||
self.assertTrue(old_client == driver.client)
|
||||
self.assertTrue(old_replica_client == driver.replica_client)
|
||||
self.assertTrue(old_replica == driver.replica)
|
||||
self.assertEqual('default', secondary_id)
|
||||
self.assertEqual(0, len(volumes_update))
|
||||
|
||||
@ddt.data(
|
||||
[
|
||||
replication.AbsReplicaOp,
|
||||
'is_running_status',
|
||||
mock.Mock(return_value=False)
|
||||
],
|
||||
[
|
||||
replication,
|
||||
'get_replication_driver_data',
|
||||
mock.Mock(return_value={})
|
||||
],
|
||||
[
|
||||
replication.PairOp,
|
||||
'get_replica_info',
|
||||
mock.Mock(return_value={})
|
||||
],
|
||||
)
|
||||
@ddt.unpack
|
||||
def test_replication_enable_fail(self, mock_module, mock_func, mock_value):
|
||||
self.mock_object(mock_module, mock_func, mock_value)
|
||||
self.mock_object(huawei_utils.time, 'time', mock.Mock(
|
||||
side_effect = utils.generate_timeout_series(
|
||||
constants.DEFAULT_REPLICA_WAIT_TIMEOUT)))
|
||||
def test_failover_normal_volumes(self):
|
||||
driver = FakeISCSIStorage(configuration=self.configuration)
|
||||
driver.do_setup()
|
||||
old_client = driver.client
|
||||
old_replica_client = driver.replica_client
|
||||
old_replica = driver.replica
|
||||
secondary_id, volumes_update = driver.failover_host(
|
||||
None, [test_volume], REPLICA_BACKEND_ID)
|
||||
self.assertEqual(REPLICA_BACKEND_ID, driver.active_backend_id)
|
||||
self.assertTrue(old_client == driver.replica_client)
|
||||
self.assertTrue(old_replica_client == driver.client)
|
||||
self.assertFalse(old_replica == driver.replica)
|
||||
self.assertEqual(REPLICA_BACKEND_ID, secondary_id)
|
||||
self.assertEqual(1, len(volumes_update))
|
||||
v_id = volumes_update[0]['volume_id']
|
||||
v_update = volumes_update[0]['updates']
|
||||
self.assertEqual(test_volume['id'], v_id)
|
||||
self.assertEqual('error', v_update['status'])
|
||||
self.assertEqual(test_volume['status'],
|
||||
v_update['metadata']['old_status'])
|
||||
|
||||
self.assertRaises(
|
||||
exception.VolumeBackendAPIException,
|
||||
self.driver.replication_enable, None, replication_volume)
|
||||
def test_failback_to_current(self):
|
||||
driver = FakeISCSIStorage(configuration=self.configuration)
|
||||
driver.active_backend_id = REPLICA_BACKEND_ID
|
||||
driver.do_setup()
|
||||
old_client = driver.client
|
||||
old_replica_client = driver.replica_client
|
||||
old_replica = driver.replica
|
||||
secondary_id, volumes_update = driver.failover_host(
|
||||
None, [test_volume], REPLICA_BACKEND_ID)
|
||||
self.assertEqual(REPLICA_BACKEND_ID, driver.active_backend_id)
|
||||
self.assertTrue(old_client == driver.client)
|
||||
self.assertTrue(old_replica_client == driver.replica_client)
|
||||
self.assertTrue(old_replica == driver.replica)
|
||||
self.assertEqual(REPLICA_BACKEND_ID, secondary_id)
|
||||
self.assertEqual(0, len(volumes_update))
|
||||
|
||||
def test_replication_disable_fail(self):
|
||||
self.mock_object(huawei_utils.time, 'time', mock.Mock(
|
||||
side_effect = utils.generate_timeout_series(
|
||||
constants.DEFAULT_REPLICA_WAIT_TIMEOUT)))
|
||||
def test_failback_normal_volumes(self):
|
||||
volume = copy.deepcopy(test_volume)
|
||||
volume['status'] = 'error'
|
||||
volume['metadata'] = {'old_status', 'available'}
|
||||
|
||||
self.assertRaises(
|
||||
exception.VolumeBackendAPIException,
|
||||
self.driver.replication_disable, None, replication_volume)
|
||||
driver = FakeISCSIStorage(configuration=self.configuration)
|
||||
driver.active_backend_id = REPLICA_BACKEND_ID
|
||||
driver.do_setup()
|
||||
old_client = driver.client
|
||||
old_replica_client = driver.replica_client
|
||||
old_replica = driver.replica
|
||||
secondary_id, volumes_update = driver.failover_host(
|
||||
None, [volume], 'default')
|
||||
self.assertTrue(driver.active_backend_id in ('', None))
|
||||
self.assertTrue(old_client == driver.replica_client)
|
||||
self.assertTrue(old_replica_client == driver.client)
|
||||
self.assertFalse(old_replica == driver.replica)
|
||||
self.assertEqual('default', secondary_id)
|
||||
self.assertEqual(1, len(volumes_update))
|
||||
v_id = volumes_update[0]['volume_id']
|
||||
v_update = volumes_update[0]['updates']
|
||||
self.assertEqual(volume['id'], v_id)
|
||||
self.assertEqual('available', v_update['status'])
|
||||
self.assertFalse('old_status' in v_update['metadata'])
|
||||
|
||||
def test_replication_disable_success(self):
|
||||
self.mock_object(replication.ReplicaCommonDriver, 'split')
|
||||
self.driver.replication_disable(None, replication_volume)
|
||||
|
||||
self.mock_object(replication, 'get_replication_driver_data',
|
||||
mock.Mock(return_value={}))
|
||||
self.driver.replication_disable(None, replication_volume)
|
||||
|
||||
def test_replication_failover_success(self):
|
||||
self.mock_object(replication.ReplicaCommonDriver, 'split')
|
||||
self.mock_object(replication.PairOp, 'is_primary',
|
||||
mock.Mock(return_value=False))
|
||||
model_update = self.driver.replication_failover(
|
||||
None, replication_volume, None)
|
||||
self.assertEqual('ubuntu@huawei2#OpenStack_Pool', model_update['host'])
|
||||
self.assertEqual('1', model_update['provider_location'])
|
||||
driver_data = {'pair_id': TEST_PAIR_ID,
|
||||
'rmt_lun_id': '11'}
|
||||
driver_data = replication.to_string(driver_data)
|
||||
self.assertEqual(driver_data, model_update['replication_driver_data'])
|
||||
|
||||
@ddt.data(
|
||||
[
|
||||
replication.PairOp,
|
||||
'is_primary',
|
||||
mock.Mock(return_value=True)
|
||||
],
|
||||
[
|
||||
replication.PairOp,
|
||||
'is_primary',
|
||||
mock.Mock(return_value=False)
|
||||
],
|
||||
[
|
||||
replication,
|
||||
'get_replication_driver_data',
|
||||
mock.Mock(return_value={})
|
||||
],
|
||||
[
|
||||
replication,
|
||||
'get_replication_driver_data',
|
||||
mock.Mock(return_value={'pair_id': '1'})
|
||||
],
|
||||
)
|
||||
@ddt.unpack
|
||||
def test_replication_failover_fail(self,
|
||||
mock_module, mock_func, mock_value):
|
||||
self.mock_object(
|
||||
replication.ReplicaCommonDriver,
|
||||
'wait_second_access',
|
||||
def test_failover_replica_volumes(self):
|
||||
driver = FakeISCSIStorage(configuration=self.configuration)
|
||||
driver.do_setup()
|
||||
old_client = driver.client
|
||||
old_replica_client = driver.replica_client
|
||||
old_replica = driver.replica
|
||||
self.mock_object(replication.ReplicaCommonDriver, 'failover')
|
||||
self.mock_object(huawei_driver.HuaweiBaseDriver, '_get_volume_params',
|
||||
mock.Mock(
|
||||
side_effect=exception.VolumeBackendAPIException(data="error")))
|
||||
self.mock_object(mock_module, mock_func, mock_value)
|
||||
self.mock_object(huawei_utils.time, 'time', mock.Mock(
|
||||
side_effect = utils.generate_timeout_series(
|
||||
constants.DEFAULT_REPLICA_WAIT_TIMEOUT)))
|
||||
return_value={'replication_enabled': 'true'}))
|
||||
secondary_id, volumes_update = driver.failover_host(
|
||||
None, [replication_volume], REPLICA_BACKEND_ID)
|
||||
self.assertEqual(REPLICA_BACKEND_ID, driver.active_backend_id)
|
||||
self.assertTrue(old_client == driver.replica_client)
|
||||
self.assertTrue(old_replica_client == driver.client)
|
||||
self.assertFalse(old_replica == driver.replica)
|
||||
self.assertEqual(REPLICA_BACKEND_ID, secondary_id)
|
||||
self.assertEqual(1, len(volumes_update))
|
||||
v_id = volumes_update[0]['volume_id']
|
||||
v_update = volumes_update[0]['updates']
|
||||
self.assertEqual(replication_volume['id'], v_id)
|
||||
self.assertEqual('1', v_update['provider_location'])
|
||||
self.assertEqual('failed-over', v_update['replication_status'])
|
||||
new_drv_data = {'pair_id': TEST_PAIR_ID,
|
||||
'rmt_lun_id': replication_volume['provider_location']}
|
||||
new_drv_data = replication.to_string(new_drv_data)
|
||||
self.assertEqual(new_drv_data, v_update['replication_driver_data'])
|
||||
|
||||
@ddt.data({}, {'pair_id': TEST_PAIR_ID})
|
||||
def test_failover_replica_volumes_invalid_drv_data(self, mock_drv_data):
|
||||
volume = copy.deepcopy(replication_volume)
|
||||
volume['replication_driver_data'] = replication.to_string(
|
||||
mock_drv_data)
|
||||
driver = FakeISCSIStorage(configuration=self.configuration)
|
||||
driver.do_setup()
|
||||
old_client = driver.client
|
||||
old_replica_client = driver.replica_client
|
||||
old_replica = driver.replica
|
||||
self.mock_object(huawei_driver.HuaweiBaseDriver, '_get_volume_params',
|
||||
mock.Mock(
|
||||
return_value={'replication_enabled': 'true'}))
|
||||
secondary_id, volumes_update = driver.failover_host(
|
||||
None, [volume], REPLICA_BACKEND_ID)
|
||||
self.assertTrue(driver.active_backend_id == REPLICA_BACKEND_ID)
|
||||
self.assertTrue(old_client == driver.replica_client)
|
||||
self.assertTrue(old_replica_client == driver.client)
|
||||
self.assertFalse(old_replica == driver.replica)
|
||||
self.assertEqual(REPLICA_BACKEND_ID, secondary_id)
|
||||
self.assertEqual(1, len(volumes_update))
|
||||
v_id = volumes_update[0]['volume_id']
|
||||
v_update = volumes_update[0]['updates']
|
||||
self.assertEqual(volume['id'], v_id)
|
||||
self.assertEqual('error', v_update['replication_status'])
|
||||
|
||||
def test_failback_replica_volumes(self):
|
||||
self.mock_object(replication.ReplicaCommonDriver, 'enable')
|
||||
self.mock_object(replication.ReplicaCommonDriver, 'wait_replica_ready')
|
||||
self.mock_object(replication.ReplicaCommonDriver, 'failover')
|
||||
self.mock_object(huawei_driver.HuaweiBaseDriver, '_get_volume_params',
|
||||
mock.Mock(
|
||||
return_value={'replication_enabled': 'true'}))
|
||||
|
||||
volume = copy.deepcopy(replication_volume)
|
||||
|
||||
driver = FakeISCSIStorage(configuration=self.configuration)
|
||||
driver.active_backend_id = REPLICA_BACKEND_ID
|
||||
driver.do_setup()
|
||||
old_client = driver.client
|
||||
old_replica_client = driver.replica_client
|
||||
old_replica = driver.replica
|
||||
secondary_id, volumes_update = driver.failover_host(
|
||||
None, [volume], 'default')
|
||||
self.assertTrue(driver.active_backend_id in ('', None))
|
||||
self.assertTrue(old_client == driver.replica_client)
|
||||
self.assertTrue(old_replica_client == driver.client)
|
||||
self.assertFalse(old_replica == driver.replica)
|
||||
self.assertEqual('default', secondary_id)
|
||||
self.assertEqual(1, len(volumes_update))
|
||||
v_id = volumes_update[0]['volume_id']
|
||||
v_update = volumes_update[0]['updates']
|
||||
self.assertEqual(replication_volume['id'], v_id)
|
||||
self.assertEqual('1', v_update['provider_location'])
|
||||
self.assertEqual('available', v_update['replication_status'])
|
||||
new_drv_data = {'pair_id': TEST_PAIR_ID,
|
||||
'rmt_lun_id': replication_volume['provider_location']}
|
||||
new_drv_data = replication.to_string(new_drv_data)
|
||||
self.assertEqual(new_drv_data, v_update['replication_driver_data'])
|
||||
|
||||
@ddt.data({}, {'pair_id': TEST_PAIR_ID})
|
||||
def test_failback_replica_volumes_invalid_drv_data(self, mock_drv_data):
|
||||
self.mock_object(huawei_driver.HuaweiBaseDriver, '_get_volume_params',
|
||||
mock.Mock(
|
||||
return_value={'replication_enabled': 'true'}))
|
||||
|
||||
volume = copy.deepcopy(replication_volume)
|
||||
volume['replication_driver_data'] = replication.to_string(
|
||||
mock_drv_data)
|
||||
|
||||
driver = FakeISCSIStorage(configuration=self.configuration)
|
||||
driver.active_backend_id = REPLICA_BACKEND_ID
|
||||
driver.do_setup()
|
||||
old_client = driver.client
|
||||
old_replica_client = driver.replica_client
|
||||
old_replica = driver.replica
|
||||
secondary_id, volumes_update = driver.failover_host(
|
||||
None, [volume], 'default')
|
||||
self.assertTrue(driver.active_backend_id in ('', None))
|
||||
self.assertTrue(old_client == driver.replica_client)
|
||||
self.assertTrue(old_replica_client == driver.client)
|
||||
self.assertFalse(old_replica == driver.replica)
|
||||
self.assertEqual('default', secondary_id)
|
||||
self.assertEqual(1, len(volumes_update))
|
||||
v_id = volumes_update[0]['volume_id']
|
||||
v_update = volumes_update[0]['updates']
|
||||
self.assertEqual(replication_volume['id'], v_id)
|
||||
self.assertEqual('error', v_update['replication_status'])
|
||||
|
||||
@mock.patch.object(replication.PairOp, 'is_primary',
|
||||
side_effect=[False, True])
|
||||
@mock.patch.object(replication.ReplicaCommonDriver, 'split')
|
||||
@mock.patch.object(replication.ReplicaCommonDriver, 'unprotect_second')
|
||||
def test_replication_driver_enable_success(self,
|
||||
mock_unprotect,
|
||||
mock_split,
|
||||
mock_is_primary):
|
||||
replica_id = TEST_PAIR_ID
|
||||
op = replication.PairOp(self.driver.client)
|
||||
common_driver = replication.ReplicaCommonDriver(self.configuration, op)
|
||||
common_driver.enable(replica_id)
|
||||
self.assertTrue(mock_unprotect.called)
|
||||
self.assertTrue(mock_split.called)
|
||||
self.assertTrue(mock_is_primary.called)
|
||||
|
||||
@mock.patch.object(replication.PairOp, 'is_primary', return_value=False)
|
||||
@mock.patch.object(replication.ReplicaCommonDriver, 'split')
|
||||
def test_replication_driver_failover_success(self,
|
||||
mock_split,
|
||||
mock_is_primary):
|
||||
replica_id = TEST_PAIR_ID
|
||||
op = replication.PairOp(self.driver.client)
|
||||
common_driver = replication.ReplicaCommonDriver(self.configuration, op)
|
||||
common_driver.failover(replica_id)
|
||||
self.assertTrue(mock_split.called)
|
||||
self.assertTrue(mock_is_primary.called)
|
||||
|
||||
@mock.patch.object(replication.PairOp, 'is_primary', return_value=True)
|
||||
def test_replication_driver_failover_fail(self, mock_is_primary):
|
||||
replica_id = TEST_PAIR_ID
|
||||
op = replication.PairOp(self.driver.client)
|
||||
common_driver = replication.ReplicaCommonDriver(self.configuration, op)
|
||||
self.assertRaises(
|
||||
exception.VolumeBackendAPIException,
|
||||
self.driver.replication_failover,
|
||||
None,
|
||||
replication_volume, None)
|
||||
|
||||
def test_list_replication_targets(self):
|
||||
info = self.driver.list_replication_targets(None, replication_volume)
|
||||
targets = [{'target_device_id': 'huawei-replica-1'}]
|
||||
self.assertEqual(targets, info['targets'])
|
||||
|
||||
self.mock_object(replication, 'get_replication_driver_data',
|
||||
mock.Mock(return_value={}))
|
||||
info = self.driver.list_replication_targets(None, replication_volume)
|
||||
self.assertEqual(targets, info['targets'])
|
||||
common_driver.failover,
|
||||
replica_id)
|
||||
|
||||
@ddt.data(constants.REPLICA_SECOND_RW, constants.REPLICA_SECOND_RO)
|
||||
def test_replication_protect_second(self, mock_access):
|
||||
def test_replication_driver_protect_second(self, mock_access):
|
||||
replica_id = TEST_PAIR_ID
|
||||
op = replication.PairOp(self.driver.client)
|
||||
common_driver = replication.ReplicaCommonDriver(self.configuration, op)
|
||||
@ -2990,7 +3118,7 @@ class HuaweiISCSIDriverTestCase(test.TestCase):
|
||||
common_driver.protect_second(replica_id)
|
||||
common_driver.unprotect_second(replica_id)
|
||||
|
||||
def test_replication_sync(self):
|
||||
def test_replication_driver_sync(self):
|
||||
replica_id = TEST_PAIR_ID
|
||||
op = replication.PairOp(self.driver.client)
|
||||
common_driver = replication.ReplicaCommonDriver(self.configuration, op)
|
||||
@ -3006,7 +3134,7 @@ class HuaweiISCSIDriverTestCase(test.TestCase):
|
||||
common_driver.sync(replica_id, True)
|
||||
common_driver.sync(replica_id, False)
|
||||
|
||||
def test_replication_split(self):
|
||||
def test_replication_driver_split(self):
|
||||
replica_id = TEST_PAIR_ID
|
||||
op = replication.PairOp(self.driver.client)
|
||||
common_driver = replication.ReplicaCommonDriver(self.configuration, op)
|
||||
|
@ -251,3 +251,37 @@ class HuaweiConf(object):
|
||||
iscsi_info.append(props)
|
||||
|
||||
setattr(self.conf, 'iscsi_info', iscsi_info)
|
||||
|
||||
def get_replication_devices(self):
|
||||
devs = self.conf.safe_get('replication_device')
|
||||
if not devs:
|
||||
return []
|
||||
|
||||
devs_config = []
|
||||
for dev in devs:
|
||||
dev_config = {}
|
||||
dev_config['backend_id'] = dev['backend_id']
|
||||
dev_config['san_address'] = dev['san_address'].split(';')
|
||||
dev_config['san_user'] = dev['san_user']
|
||||
dev_config['san_password'] = dev['san_password']
|
||||
dev_config['storage_pool'] = dev['storage_pool'].split(';')
|
||||
dev_config['iscsi_info'] = []
|
||||
dev_config['iscsi_default_target_ip'] = (
|
||||
dev['iscsi_default_target_ip'].split(';')
|
||||
if 'iscsi_default_target_ip' in dev
|
||||
else [])
|
||||
devs_config.append(dev_config)
|
||||
|
||||
return devs_config
|
||||
|
||||
def get_local_device(self):
|
||||
dev_config = {
|
||||
'backend_id': "default",
|
||||
'san_address': self.conf.san_address,
|
||||
'san_user': self.conf.san_user,
|
||||
'san_password': self.conf.san_password,
|
||||
'storage_pool': self.conf.storage_pools,
|
||||
'iscsi_info': self.conf.iscsi_info,
|
||||
'iscsi_default_target_ip': self.conf.iscsi_default_target_ip,
|
||||
}
|
||||
return dev_config
|
||||
|
@ -64,20 +64,45 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
msg = _('Configuration is not found.')
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
|
||||
self.active_backend_id = kwargs.get('active_backend_id')
|
||||
|
||||
self.configuration.append_config_values(huawei_opts)
|
||||
self.huawei_conf = huawei_conf.HuaweiConf(self.configuration)
|
||||
self.metro_flag = False
|
||||
self.replica = None
|
||||
|
||||
def get_local_and_remote_dev_conf(self):
|
||||
self.loc_dev_conf = self.huawei_conf.get_local_device()
|
||||
|
||||
# Now just support one replication_devices.
|
||||
replica_devs = self.huawei_conf.get_replication_devices()
|
||||
self.replica_dev_conf = replica_devs[0] if replica_devs else {}
|
||||
|
||||
def get_local_and_remote_client_conf(self):
|
||||
if self.active_backend_id:
|
||||
return self.replica_dev_conf, self.loc_dev_conf
|
||||
else:
|
||||
return self.loc_dev_conf, self.replica_dev_conf
|
||||
|
||||
def do_setup(self, context):
|
||||
"""Instantiate common class and login storage system."""
|
||||
# Set huawei private configuration into Configuration object.
|
||||
self.huawei_conf.update_config_value()
|
||||
|
||||
self.get_local_and_remote_dev_conf()
|
||||
client_conf, replica_client_conf = (
|
||||
self.get_local_and_remote_client_conf())
|
||||
|
||||
# init local client
|
||||
if not client_conf:
|
||||
msg = _('Get active client failed.')
|
||||
LOG.error(msg)
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
self.client = rest_client.RestClient(self.configuration,
|
||||
self.configuration.san_address,
|
||||
self.configuration.san_user,
|
||||
self.configuration.san_password)
|
||||
**client_conf)
|
||||
self.client.login()
|
||||
|
||||
# init remote client
|
||||
metro_san_address = self.configuration.safe_get("metro_san_address")
|
||||
metro_san_user = self.configuration.safe_get("metro_san_user")
|
||||
@ -92,7 +117,12 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
self.rmt_client.login()
|
||||
|
||||
# init replication manager
|
||||
if replica_client_conf:
|
||||
self.replica_client = rest_client.RestClient(self.configuration,
|
||||
**replica_client_conf)
|
||||
self.replica_client.try_login()
|
||||
self.replica = replication.ReplicaPairManager(self.client,
|
||||
self.replica_client,
|
||||
self.configuration)
|
||||
|
||||
def check_for_setup_error(self):
|
||||
@ -103,8 +133,15 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
self.huawei_conf.update_config_value()
|
||||
if self.metro_flag:
|
||||
self.rmt_client.get_all_pools()
|
||||
|
||||
stats = self.client.update_volume_stats()
|
||||
|
||||
if self.replica:
|
||||
stats = self.replica.update_replica_capability(stats)
|
||||
targets = [self.replica_dev_conf['backend_id']]
|
||||
stats['replication_targets'] = targets
|
||||
stats['replication_enabled'] = True
|
||||
|
||||
return stats
|
||||
|
||||
def _get_volume_type(self, volume):
|
||||
@ -1393,21 +1430,109 @@ class HuaweiBaseDriver(driver.VolumeDriver):
|
||||
{'snapshot_id': snapshot['id'],
|
||||
'snapshot_name': snapshot_name})
|
||||
|
||||
def replication_enable(self, context, volume):
|
||||
"""Enable replication and do switch role when needed."""
|
||||
self.replica.enable_replica(volume)
|
||||
def _classify_volume(self, volumes):
|
||||
normal_volumes = []
|
||||
replica_volumes = []
|
||||
|
||||
def replication_disable(self, context, volume):
|
||||
"""Disable replication."""
|
||||
self.replica.disable_replica(volume)
|
||||
for v in volumes:
|
||||
volume_type = self._get_volume_type(v)
|
||||
opts = self._get_volume_params(volume_type)
|
||||
if opts.get('replication_enabled') == 'true':
|
||||
replica_volumes.append(v)
|
||||
else:
|
||||
normal_volumes.append(v)
|
||||
|
||||
def replication_failover(self, context, volume, secondary):
|
||||
"""Disable replication and unprotect remote LUN."""
|
||||
return self.replica.failover_replica(volume)
|
||||
return normal_volumes, replica_volumes
|
||||
|
||||
def list_replication_targets(self, context, vref):
|
||||
"""Obtain volume repliction targets."""
|
||||
return self.replica.list_replica_targets(vref)
|
||||
def _failback_normal_volumes(self, volumes):
|
||||
volumes_update = []
|
||||
for v in volumes:
|
||||
v_update = {}
|
||||
v_update['volume_id'] = v['id']
|
||||
metadata = huawei_utils.get_volume_metadata(v)
|
||||
old_status = 'available'
|
||||
if 'old_status' in metadata:
|
||||
old_status = metadata['old_status']
|
||||
del metadata['old_status']
|
||||
v_update['updates'] = {'status': old_status,
|
||||
'metadata': metadata}
|
||||
volumes_update.append(v_update)
|
||||
|
||||
return volumes_update
|
||||
|
||||
def _failback(self, volumes):
|
||||
if self.active_backend_id in ('', None):
|
||||
return 'default', []
|
||||
|
||||
normal_volumes, replica_volumes = self._classify_volume(volumes)
|
||||
volumes_update = []
|
||||
|
||||
replica_volumes_update = self.replica.failback(replica_volumes)
|
||||
volumes_update.extend(replica_volumes_update)
|
||||
|
||||
normal_volumes_update = self._failback_normal_volumes(normal_volumes)
|
||||
volumes_update.extend(normal_volumes_update)
|
||||
|
||||
self.active_backend_id = ""
|
||||
secondary_id = 'default'
|
||||
|
||||
# Switch array connection.
|
||||
self.client, self.replica_client = self.replica_client, self.client
|
||||
self.replica = replication.ReplicaPairManager(self.client,
|
||||
self.replica_client,
|
||||
self.configuration)
|
||||
return secondary_id, volumes_update
|
||||
|
||||
def _failover_normal_volumes(self, volumes):
|
||||
volumes_update = []
|
||||
|
||||
for v in volumes:
|
||||
v_update = {}
|
||||
v_update['volume_id'] = v['id']
|
||||
metadata = huawei_utils.get_volume_metadata(v)
|
||||
metadata.update({'old_status': v['status']})
|
||||
v_update['updates'] = {'status': 'error',
|
||||
'metadata': metadata}
|
||||
volumes_update.append(v_update)
|
||||
|
||||
return volumes_update
|
||||
|
||||
def _failover(self, volumes):
|
||||
if self.active_backend_id not in ('', None):
|
||||
return self.replica_dev_conf['backend_id'], []
|
||||
|
||||
normal_volumes, replica_volumes = self._classify_volume(volumes)
|
||||
volumes_update = []
|
||||
|
||||
replica_volumes_update = self.replica.failover(replica_volumes)
|
||||
volumes_update.extend(replica_volumes_update)
|
||||
|
||||
normal_volumes_update = self._failover_normal_volumes(normal_volumes)
|
||||
volumes_update.extend(normal_volumes_update)
|
||||
|
||||
self.active_backend_id = self.replica_dev_conf['backend_id']
|
||||
secondary_id = self.active_backend_id
|
||||
|
||||
# Switch array connection.
|
||||
self.client, self.replica_client = self.replica_client, self.client
|
||||
self.replica = replication.ReplicaPairManager(self.client,
|
||||
self.replica_client,
|
||||
self.configuration)
|
||||
return secondary_id, volumes_update
|
||||
|
||||
def failover_host(self, context, volumes, secondary_id=None):
|
||||
"""Failover all volumes to secondary."""
|
||||
if secondary_id == 'default':
|
||||
secondary_id, volumes_update = self._failback(volumes)
|
||||
elif (secondary_id == self.replica_dev_conf['backend_id']
|
||||
or secondary_id is None):
|
||||
secondary_id, volumes_update = self._failover(volumes)
|
||||
else:
|
||||
msg = _("Invalid secondary id %s.") % secondary_id
|
||||
LOG.error(msg)
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
return secondary_id, volumes_update
|
||||
|
||||
|
||||
class HuaweiISCSIDriver(HuaweiBaseDriver, driver.ISCSIDriver):
|
||||
|
@ -15,7 +15,6 @@
|
||||
#
|
||||
|
||||
import json
|
||||
import re
|
||||
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import excutils
|
||||
@ -24,8 +23,6 @@ from cinder import exception
|
||||
from cinder.i18n import _, _LW, _LE
|
||||
from cinder.volume.drivers.huawei import constants
|
||||
from cinder.volume.drivers.huawei import huawei_utils
|
||||
from cinder.volume.drivers.huawei import rest_client
|
||||
from cinder.volume import utils as volume_utils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -213,10 +210,6 @@ class ReplicaCommonDriver(object):
|
||||
self.sync(replica_id)
|
||||
return None
|
||||
|
||||
def disable(self, replica_id):
|
||||
self.split(replica_id)
|
||||
return None
|
||||
|
||||
def switch(self, replica_id):
|
||||
self.split(replica_id)
|
||||
self.unprotect_second(replica_id)
|
||||
@ -332,33 +325,19 @@ def to_string(dict_data):
|
||||
|
||||
|
||||
class ReplicaPairManager(object):
|
||||
def __init__(self, local_client, conf):
|
||||
def __init__(self, local_client, rmt_client, conf):
|
||||
self.local_client = local_client
|
||||
self.rmt_client = rmt_client
|
||||
self.conf = conf
|
||||
self.replica_device = self.conf.safe_get('replication_device')
|
||||
if not self.replica_device:
|
||||
return
|
||||
|
||||
# managed_backed_name format: host_name@backend_name#pool_name
|
||||
self.rmt_backend = self.replica_device[0]['managed_backend_name']
|
||||
self.rmt_pool = volume_utils.extract_host(self.rmt_backend,
|
||||
level='pool')
|
||||
self.target_dev_id = self.replica_device[0]['target_device_id']
|
||||
# Now just support one remote pool.
|
||||
self.rmt_pool = self.rmt_client.storage_pools[0]
|
||||
|
||||
self._init_rmt_client()
|
||||
self.local_op = PairOp(self.local_client)
|
||||
self.local_driver = ReplicaCommonDriver(self.conf, self.local_op)
|
||||
self.rmt_op = PairOp(self.rmt_client)
|
||||
self.rmt_driver = ReplicaCommonDriver(self.conf, self.rmt_op)
|
||||
|
||||
self.try_login_remote_array()
|
||||
|
||||
def try_login_remote_array(self):
|
||||
try:
|
||||
self.rmt_client.login()
|
||||
except Exception as err:
|
||||
LOG.warning(_LW('Remote array login failed. Error: %s.'), err)
|
||||
|
||||
def try_get_remote_wwn(self):
|
||||
try:
|
||||
info = self.rmt_client.get_array_info()
|
||||
@ -381,9 +360,6 @@ class ReplicaPairManager(object):
|
||||
return {}
|
||||
|
||||
def check_remote_available(self):
|
||||
if not self.replica_device:
|
||||
return False
|
||||
|
||||
# We get device wwn in every check time.
|
||||
# If remote array changed, we can run normally.
|
||||
wwn = self.try_get_remote_wwn()
|
||||
@ -404,10 +380,7 @@ class ReplicaPairManager(object):
|
||||
def update_replica_capability(self, stats):
|
||||
is_rmt_dev_available = self.check_remote_available()
|
||||
if not is_rmt_dev_available:
|
||||
if self.replica_device:
|
||||
LOG.warning(_LW('Remote device is unavailable. '
|
||||
'Remote backend: %s.'),
|
||||
self.rmt_backend)
|
||||
LOG.warning(_LW('Remote device is unavailable.'))
|
||||
return stats
|
||||
|
||||
for pool in stats['pools']:
|
||||
@ -416,17 +389,6 @@ class ReplicaPairManager(object):
|
||||
|
||||
return stats
|
||||
|
||||
def _init_rmt_client(self):
|
||||
# Multiple addresses support.
|
||||
rmt_addrs = self.replica_device[0]['san_address'].split(';')
|
||||
rmt_addrs = list(set([x.strip() for x in rmt_addrs if x.strip()]))
|
||||
rmt_user = self.replica_device[0]['san_user']
|
||||
rmt_password = self.replica_device[0]['san_password']
|
||||
self.rmt_client = rest_client.RestClient(self.conf,
|
||||
rmt_addrs,
|
||||
rmt_user,
|
||||
rmt_password)
|
||||
|
||||
def get_rmt_dev_info(self):
|
||||
wwn = self.try_get_remote_wwn()
|
||||
if not wwn:
|
||||
@ -545,7 +507,7 @@ class ReplicaPairManager(object):
|
||||
driver_data = {'pair_id': pair_id,
|
||||
'rmt_lun_id': rmt_lun_id}
|
||||
model_update['replication_driver_data'] = to_string(driver_data)
|
||||
model_update['replication_status'] = 'enabled'
|
||||
model_update['replication_status'] = 'available'
|
||||
LOG.debug('Create replication, return info: %s.', model_update)
|
||||
return model_update
|
||||
|
||||
@ -579,91 +541,102 @@ class ReplicaPairManager(object):
|
||||
if rmt_lun_id:
|
||||
self._delete_rmt_lun(rmt_lun_id)
|
||||
|
||||
def enable_replica(self, volume):
|
||||
"""Enable replication.
|
||||
def failback(self, volumes):
|
||||
"""Failover volumes back to primary backend.
|
||||
|
||||
Purpose:
|
||||
1. If local backend's array is secondary, switch to primary
|
||||
2. Synchronize data
|
||||
The main steps:
|
||||
1. Switch the role of replication pairs.
|
||||
2. Copy the second LUN data back to primary LUN.
|
||||
3. Split replication pairs.
|
||||
4. Switch the role of replication pairs.
|
||||
5. Enable replications.
|
||||
"""
|
||||
LOG.debug('Enable replication, volume: %s.', volume['id'])
|
||||
|
||||
info = get_replication_driver_data(volume)
|
||||
pair_id = info.get('pair_id')
|
||||
volumes_update = []
|
||||
for v in volumes:
|
||||
v_update = {}
|
||||
v_update['volume_id'] = v['id']
|
||||
drv_data = get_replication_driver_data(v)
|
||||
pair_id = drv_data.get('pair_id')
|
||||
if not pair_id:
|
||||
msg = _('No pair id in volume replication_driver_data.')
|
||||
LOG.error(msg)
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
LOG.warning(_LW("No pair id in volume %s."), v['id'])
|
||||
v_update['updates'] = {'replication_status': 'error'}
|
||||
volumes_update.append(v_update)
|
||||
continue
|
||||
|
||||
info = self.local_op.get_replica_info(pair_id)
|
||||
if not info:
|
||||
msg = _('Pair does not exist on array. Pair id: %s.') % pair_id
|
||||
LOG.error(msg)
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
wait_sync_complete = False
|
||||
if info.get('REPLICATIONMODEL') == constants.REPLICA_SYNC_MODEL:
|
||||
wait_sync_complete = True
|
||||
|
||||
return self.local_driver.enable(pair_id, wait_sync_complete)
|
||||
|
||||
def disable_replica(self, volume):
|
||||
"""We consider that all abnormal states is disabled."""
|
||||
LOG.debug('Disable replication, volume: %s.', volume['id'])
|
||||
|
||||
info = get_replication_driver_data(volume)
|
||||
pair_id = info.get('pair_id')
|
||||
if not pair_id:
|
||||
LOG.warning(_LW('No pair id in volume replication_driver_data.'))
|
||||
return None
|
||||
|
||||
return self.local_driver.disable(pair_id)
|
||||
|
||||
def failover_replica(self, volume):
|
||||
"""Just make the secondary available."""
|
||||
LOG.debug('Failover replication, volume: %s.', volume['id'])
|
||||
|
||||
info = get_replication_driver_data(volume)
|
||||
pair_id = info.get('pair_id')
|
||||
if not pair_id:
|
||||
msg = _('No pair id in volume replication_driver_data.')
|
||||
LOG.error(msg)
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
|
||||
rmt_lun_id = info.get('rmt_lun_id')
|
||||
rmt_lun_id = drv_data.get('rmt_lun_id')
|
||||
if not rmt_lun_id:
|
||||
msg = _('No remote LUN id in volume replication_driver_data.')
|
||||
LOG.error(msg)
|
||||
raise exception.VolumeBackendAPIException(data=msg)
|
||||
LOG.warning(_LW("No remote lun id in volume %s."), v['id'])
|
||||
v_update['updates'] = {'replication_status': 'error'}
|
||||
volumes_update.append(v_update)
|
||||
continue
|
||||
|
||||
# Switch replication pair role, and start synchronize.
|
||||
self.local_driver.enable(pair_id)
|
||||
|
||||
# Wait for synchronize complete.
|
||||
self.local_driver.wait_replica_ready(pair_id)
|
||||
|
||||
# Split replication pair again
|
||||
self.rmt_driver.failover(pair_id)
|
||||
|
||||
# Switch replication pair role, and start synchronize.
|
||||
self.rmt_driver.enable(pair_id)
|
||||
|
||||
# Remote array must be available. So we can get the real pool info.
|
||||
lun_info = self.rmt_client.get_lun_info(rmt_lun_id)
|
||||
lun_wwn = lun_info.get('WWN')
|
||||
lun_pool = lun_info.get('PARENTNAME')
|
||||
new_backend = re.sub(r'(?<=#).*$', lun_pool, self.rmt_backend)
|
||||
metadata = huawei_utils.get_volume_metadata(v)
|
||||
metadata.update({'lun_wwn': lun_wwn})
|
||||
new_drv_data = {'pair_id': pair_id,
|
||||
'rmt_lun_id': v['provider_location']}
|
||||
new_drv_data = to_string(new_drv_data)
|
||||
v_update['updates'] = {'provider_location': rmt_lun_id,
|
||||
'replication_status': 'available',
|
||||
'replication_driver_data': new_drv_data,
|
||||
'metadata': metadata}
|
||||
volumes_update.append(v_update)
|
||||
|
||||
return volumes_update
|
||||
|
||||
def failover(self, volumes):
|
||||
"""Failover volumes back to secondary array.
|
||||
|
||||
Split the replication pairs and make the secondary LUNs R&W.
|
||||
"""
|
||||
volumes_update = []
|
||||
for v in volumes:
|
||||
v_update = {}
|
||||
v_update['volume_id'] = v['id']
|
||||
drv_data = get_replication_driver_data(v)
|
||||
pair_id = drv_data.get('pair_id')
|
||||
if not pair_id:
|
||||
LOG.warning(_LW("No pair id in volume %s."), v['id'])
|
||||
v_update['updates'] = {'replication_status': 'error'}
|
||||
volumes_update.append(v_update)
|
||||
continue
|
||||
|
||||
rmt_lun_id = drv_data.get('rmt_lun_id')
|
||||
if not rmt_lun_id:
|
||||
LOG.warning(_LW("No remote lun id in volume %s."), v['id'])
|
||||
v_update['updates'] = {'replication_status': 'error'}
|
||||
volumes_update.append(v_update)
|
||||
continue
|
||||
|
||||
self.rmt_driver.failover(pair_id)
|
||||
|
||||
metadata = huawei_utils.get_volume_metadata(volume)
|
||||
lun_info = self.rmt_client.get_lun_info(rmt_lun_id)
|
||||
lun_wwn = lun_info.get('WWN')
|
||||
metadata = huawei_utils.get_volume_metadata(v)
|
||||
metadata.update({'lun_wwn': lun_wwn})
|
||||
|
||||
new_driver_data = {'pair_id': pair_id,
|
||||
'rmt_lun_id': volume['provider_location']}
|
||||
new_driver_data = to_string(new_driver_data)
|
||||
return {'host': new_backend,
|
||||
'provider_location': rmt_lun_id,
|
||||
'replication_driver_data': new_driver_data,
|
||||
new_drv_data = {'pair_id': pair_id,
|
||||
'rmt_lun_id': v['provider_location']}
|
||||
new_drv_data = to_string(new_drv_data)
|
||||
v_update['updates'] = {'provider_location': rmt_lun_id,
|
||||
'replication_status': 'failed-over',
|
||||
'replication_driver_data': new_drv_data,
|
||||
'metadata': metadata}
|
||||
volumes_update.append(v_update)
|
||||
|
||||
def list_replica_targets(self, volume):
|
||||
info = get_replication_driver_data(volume)
|
||||
if not info:
|
||||
LOG.warning(_LW('Replication driver data does not exist. '
|
||||
'Volume: %s'), volume['id'])
|
||||
|
||||
targets = [{'target_device_id': self.target_dev_id}]
|
||||
return {'volume_id': volume['id'],
|
||||
'targets': targets}
|
||||
return volumes_update
|
||||
|
||||
|
||||
def get_replication_opts(opts):
|
||||
|
@ -36,12 +36,20 @@ LOG = logging.getLogger(__name__)
|
||||
class RestClient(object):
|
||||
"""Common class for Huawei OceanStor storage system."""
|
||||
|
||||
def __init__(self, configuration, san_address, san_user, san_password):
|
||||
def __init__(self, configuration, san_address, san_user, san_password,
|
||||
**kwargs):
|
||||
self.configuration = configuration
|
||||
self.san_address = san_address
|
||||
self.san_user = san_user
|
||||
self.san_password = san_password
|
||||
self.init_http_head()
|
||||
self.storage_pools = kwargs.get('storage_pools',
|
||||
self.configuration.storage_pools)
|
||||
self.iscsi_info = kwargs.get('iscsi_info',
|
||||
self.configuration.iscsi_info)
|
||||
self.iscsi_default_target_ip = kwargs.get(
|
||||
'iscsi_default_target_ip',
|
||||
self.configuration.iscsi_default_target_ip)
|
||||
|
||||
def init_http_head(self):
|
||||
self.cookie = http_cookiejar.CookieJar()
|
||||
@ -132,6 +140,12 @@ class RestClient(object):
|
||||
|
||||
return device_id
|
||||
|
||||
def try_login(self):
|
||||
try:
|
||||
self.login()
|
||||
except Exception as err:
|
||||
LOG.warning(_LW('Login failed. Error: %s.'), err)
|
||||
|
||||
@utils.synchronized('huawei_cinder_call')
|
||||
def call(self, url, data=None, method=None):
|
||||
"""Send requests to server.
|
||||
@ -240,7 +254,7 @@ class RestClient(object):
|
||||
if not pool_info:
|
||||
# The following code is to keep compatibility with old version of
|
||||
# Huawei driver.
|
||||
for pool_name in self.configuration.storage_pools:
|
||||
for pool_name in self.storage_pools:
|
||||
pool_info = self.get_pool_info(pool_name, pools)
|
||||
if pool_info:
|
||||
break
|
||||
@ -762,9 +776,9 @@ class RestClient(object):
|
||||
initiator_name,
|
||||
host_id):
|
||||
"""Associate initiator with the host."""
|
||||
chapinfo = self.find_chap_info(self.configuration.iscsi_info,
|
||||
chapinfo = self.find_chap_info(self.iscsi_info,
|
||||
initiator_name)
|
||||
multipath_type = self._find_alua_info(self.configuration.iscsi_info,
|
||||
multipath_type = self._find_alua_info(self.iscsi_info,
|
||||
initiator_name)
|
||||
if chapinfo:
|
||||
LOG.info(_LI('Use CHAP when adding initiator to host.'))
|
||||
@ -1118,7 +1132,7 @@ class RestClient(object):
|
||||
data = {}
|
||||
data['pools'] = []
|
||||
result = self.get_all_pools()
|
||||
for pool_name in self.configuration.storage_pools:
|
||||
for pool_name in self.storage_pools:
|
||||
capacity = self._get_capacity(pool_name, result)
|
||||
pool = {}
|
||||
pool.update(dict(
|
||||
@ -1198,7 +1212,7 @@ class RestClient(object):
|
||||
target_iqns = []
|
||||
portgroup = None
|
||||
portgroup_id = None
|
||||
for ini in self.configuration.iscsi_info:
|
||||
for ini in self.iscsi_info:
|
||||
if ini['Name'] == initiator:
|
||||
for key in ini:
|
||||
if key == 'TargetPortGroup':
|
||||
@ -1212,7 +1226,7 @@ class RestClient(object):
|
||||
|
||||
# If not specify target IP for some initiators, use default IP.
|
||||
if not target_ips:
|
||||
default_target_ips = self.configuration.iscsi_default_target_ip
|
||||
default_target_ips = self.iscsi_default_target_ip
|
||||
if default_target_ips:
|
||||
target_ips.append(default_target_ips[0])
|
||||
|
||||
|
@ -0,0 +1,3 @@
|
||||
---
|
||||
features:
|
||||
- Added v2.1 replication support in Huawei Cinder driver.
|
Loading…
Reference in New Issue
Block a user