diff --git a/cinder/tests/unit/volume/drivers/ibm/test_storwize_svc.py b/cinder/tests/unit/volume/drivers/ibm/test_storwize_svc.py index 32139f63d02..cf34d2ceb29 100644 --- a/cinder/tests/unit/volume/drivers/ibm/test_storwize_svc.py +++ b/cinder/tests/unit/volume/drivers/ibm/test_storwize_svc.py @@ -6243,7 +6243,6 @@ class StorwizeSVCReplicationTestCase(test.TestCase): def _create_test_volume(self, rep_type): volume = self._generate_vol_info(rep_type) model_update = self.driver.create_volume(volume) - volume['status'] = 'available' return volume, model_update def _get_vdisk_uid(self, vdisk_name): @@ -6966,10 +6965,13 @@ class StorwizeSVCReplicationTestCase(test.TestCase): [self.rep_target]) self.driver.do_setup(self.ctxt) - # Create metro mirror replication. + # Create replication volume. mm_vol, model_update = self._create_test_volume(self.mm_type) self.assertEqual('enabled', model_update['replication_status']) mm_vol['status'] = 'in-use' + gm_vol, model_update = self._create_test_volume(self.gm_type) + self.assertEqual('enabled', model_update['replication_status']) + gm_vol['status'] = 'available' # Create non-replication volume. non_replica_vol1, model_update = self._create_test_volume( @@ -6981,12 +6983,14 @@ class StorwizeSVCReplicationTestCase(test.TestCase): non_replica_vol1['status'] = 'error' non_replica_vol2['status'] = 'available' - volumes = [mm_vol, non_replica_vol1, non_replica_vol2] + volumes = [mm_vol, non_replica_vol1, non_replica_vol2, gm_vol] rep_data0 = json.dumps({'previous_status': mm_vol['status']}) rep_data1 = json.dumps({'previous_status': non_replica_vol1['status']}) rep_data2 = json.dumps({'previous_status': non_replica_vol2['status']}) - failover_expect = [{'updates': {'status': 'error', + failover_expect = [{'updates': {'replication_status': 'failed-over'}, + 'volume_id': gm_vol['id']}, + {'updates': {'status': 'error', 'replication_driver_data': rep_data0}, 'volume_id': mm_vol['id']}, {'updates': {'status': 'error', @@ -6995,15 +6999,6 @@ class StorwizeSVCReplicationTestCase(test.TestCase): {'updates': {'status': 'error', 'replication_driver_data': rep_data2}, 'volume_id': non_replica_vol2['id']}] - failback_expect = [{'updates': {'status': 'in-use', - 'replication_driver_data': ''}, - 'volume_id': mm_vol['id']}, - {'updates': {'status': 'error', - 'replication_driver_data': ''}, - 'volume_id': non_replica_vol1['id']}, - {'updates': {'status': 'available', - 'replication_driver_data': ''}, - 'volume_id': non_replica_vol2['id']}] # Already failback target_id, volume_list = self.driver.failover_host( self.ctxt, volumes, 'default') @@ -7025,6 +7020,20 @@ class StorwizeSVCReplicationTestCase(test.TestCase): {'previous_status': 'error'}) non_replica_vol2['replication_driver_data'] = json.dumps( {'previous_status': 'available'}) + gm_vol['status'] = 'in-use' + rep_data3 = json.dumps({'previous_status': gm_vol['status']}) + failback_expect = [{'updates': {'status': 'in-use', + 'replication_driver_data': ''}, + 'volume_id': mm_vol['id']}, + {'updates': {'status': 'error', + 'replication_driver_data': ''}, + 'volume_id': non_replica_vol1['id']}, + {'updates': {'status': 'available', + 'replication_driver_data': ''}, + 'volume_id': non_replica_vol2['id']}, + {'updates': {'status': 'error', + 'replication_driver_data': rep_data3}, + 'volume_id': gm_vol['id']}] target_id, volume_list = self.driver.failover_host( self.ctxt, volumes, 'default') self.assertEqual('default', target_id) @@ -7071,6 +7080,30 @@ class StorwizeSVCReplicationTestCase(test.TestCase): self.assertIsNotNone(partner_info) self.assertEqual(partner_info['name'], source_system_name) + @mock.patch.object(storwize_svc_common.StorwizeHelpers, + 'get_partnership_info') + @mock.patch.object(storwize_svc_common.StorwizeHelpers, + 'chpartnership') + def test_start_partnership(self, chpartnership, get_partnership_info): + get_partnership_info.side_effect = [ + None, + {'partnership': 'fully_configured', + 'id': '0'}, + {'partnership': 'fully_configured_stopped', + 'id': '0'}] + + rep_mgr = self.driver._get_replica_mgr() + rep_mgr._partnership_start(rep_mgr._master_helpers, + 'storwize-svc-sim') + self.assertFalse(chpartnership.called) + rep_mgr._partnership_start(rep_mgr._master_helpers, + 'storwize-svc-sim') + self.assertFalse(chpartnership.called) + + rep_mgr._partnership_start(rep_mgr._master_helpers, + 'storwize-svc-sim') + chpartnership.assert_called_once_with('0') + @mock.patch.object(storwize_svc_common.StorwizeHelpers, 'start_relationship') def test_sync_replica_volumes_with_aux(self, start_relationship): diff --git a/cinder/volume/drivers/ibm/storwize_svc/replication.py b/cinder/volume/drivers/ibm/storwize_svc/replication.py index 610b5d89a8a..dd9a3c3c2d0 100644 --- a/cinder/volume/drivers/ibm/storwize_svc/replication.py +++ b/cinder/volume/drivers/ibm/storwize_svc/replication.py @@ -396,15 +396,25 @@ class StorwizeSVCReplicationManager(object): client.mkfcpartnership(remote_name) else: client.mkippartnership(remote_ip) - partnership_info = client.get_partnership_info(remote_name) - if partnership_info['partnership'] != 'fully_configured': - client.chpartnership(partnership_info['id']) except Exception: msg = (_('Unable to establish the partnership with ' 'the Storwize cluster %s.'), remote_name) LOG.error(msg) raise exception.VolumeDriverException(message=msg) + def _partnership_start(self, client, remote_name): + try: + partnership_info = client.get_partnership_info( + remote_name) + if (partnership_info and + partnership_info['partnership'] != 'fully_configured'): + client.chpartnership(partnership_info['id']) + except Exception: + msg = (_('Unable to start the partnership with ' + 'the Storwize cluster %s.'), remote_name) + LOG.error(msg) + raise exception.VolumeDriverException(message=msg) + def establish_target_partnership(self): local_system_info = self._master_helpers.get_system_info() target_system_info = self.target_helpers.get_system_info() @@ -419,3 +429,5 @@ class StorwizeSVCReplicationManager(object): target_system_name, target_ip) self._partnership_validate_create(self.target_helpers, local_system_name, local_ip) + self._partnership_start(self._master_helpers, target_system_name) + self._partnership_start(self.target_helpers, local_system_name) diff --git a/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py b/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py index 5fa4e064425..c46f69fa444 100644 --- a/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py +++ b/cinder/volume/drivers/ibm/storwize_svc/storwize_svc_common.py @@ -397,7 +397,7 @@ class StorwizeSSH(object): self.run_ssh_assert_no_output(ssh_cmd) def mkvdisk(self, name, size, units, pool, opts, params): - ssh_cmd = ['svctask', 'mkvdisk', '-name', name, '-mdiskgrp', + ssh_cmd = ['svctask', 'mkvdisk', '-name', '"%s"' % name, '-mdiskgrp', '"%s"' % pool, '-iogrp', six.text_type(opts['iogrp']), '-size', size, '-unit', units] + params try: @@ -1281,7 +1281,6 @@ class StorwizeHelpers(object): return params def create_vdisk(self, name, size, units, pool, opts): - name = '"%s"' % name LOG.debug('Enter: create_vdisk: vdisk %s.', name) mdiskgrp = pool if opts['mirror_pool']: @@ -2809,7 +2808,7 @@ class StorwizeSVCCommonDriver(san.SanDriver, LOG.error(msg) raise exception.UnableToFailOver(reason=msg) - normal_volumes, rep_volumes = self._classify_volume(ctxt, volumes) + unrep_volumes, rep_volumes = self._classify_volume(ctxt, volumes) # start synchronize from aux volume to master volume self._sync_with_aux(ctxt, rep_volumes) @@ -2819,8 +2818,9 @@ class StorwizeSVCCommonDriver(san.SanDriver, rep_volumes) volumes_update.extend(rep_volumes_update) - normal_volumes_update = self._failback_normal_volumes(normal_volumes) - volumes_update.extend(normal_volumes_update) + unrep_volumes_update = self._failover_unreplicated_volume( + unrep_volumes) + volumes_update.extend(unrep_volumes_update) self._helpers = self._master_backend_helpers self._active_backend_id = None @@ -2876,18 +2876,22 @@ class StorwizeSVCCommonDriver(san.SanDriver, {'volumes_update': volumes_update}) return volumes_update - def _failback_normal_volumes(self, normal_volumes): + def _failover_unreplicated_volume(self, unreplicated_vols): volumes_update = [] - for vol in normal_volumes: - pre_status = 'available' - if ('replication_driver_data' in vol and - vol['replication_driver_data']): - rep_data = json.loads(vol['replication_driver_data']) - pre_status = rep_data['previous_status'] + for vol in unreplicated_vols: + if vol.replication_driver_data: + rep_data = json.loads(vol.replication_driver_data) + update_status = rep_data['previous_status'] + rep_data = '' + else: + update_status = 'error' + rep_data = json.dumps({'previous_status': vol.status}) + volumes_update.append( - {'volume_id': vol['id'], - 'updates': {'status': pre_status, - 'replication_driver_data': ''}}) + {'volume_id': vol.id, + 'updates': {'status': update_status, + 'replication_driver_data': rep_data}}) + return volumes_update def _sync_with_aux(self, ctxt, volumes): @@ -2998,13 +3002,14 @@ class StorwizeSVCCommonDriver(san.SanDriver, LOG.error(msg) raise exception.UnableToFailOver(reason=msg) - normal_volumes, rep_volumes = self._classify_volume(ctxt, volumes) + unrep_volumes, rep_volumes = self._classify_volume(ctxt, volumes) rep_volumes_update = self._failover_replica_volumes(ctxt, rep_volumes) volumes_update.extend(rep_volumes_update) - normal_volumes_update = self._failover_normal_volumes(normal_volumes) - volumes_update.extend(normal_volumes_update) + unrep_volumes_update = self._failover_unreplicated_volume( + unrep_volumes) + volumes_update.extend(unrep_volumes_update) self._helpers = self._aux_backend_helpers self._active_backend_id = self._replica_target['backend_id'] @@ -3063,19 +3068,6 @@ class StorwizeSVCCommonDriver(san.SanDriver, {'volumes_update': volumes_update}) return volumes_update - def _failover_normal_volumes(self, normal_volumes): - volumes_update = [] - for volume in normal_volumes: - # If the volume is not of replicated type, we need to - # force the status into error state so a user knows they - # do not have access to the volume. - rep_data = json.dumps({'previous_status': volume['status']}) - volumes_update.append( - {'volume_id': volume['id'], - 'updates': {'status': 'error', - 'replication_driver_data': rep_data}}) - return volumes_update - def _classify_volume(self, ctxt, volumes): normal_volumes = [] replica_volumes = []