From 9a73f5999c4986e29ecf8349ed138346601cfa43 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Dulko?= Date: Mon, 19 Sep 2016 12:12:49 +0200 Subject: [PATCH] Remove support for 2.x volume RPC API This commit gets rid of most of our Mitaka compatibility code in volume RPC API. Change-Id: I5606528c8db9a725c6450d084fdcb369db60b49b --- cinder/tests/unit/consistencygroup/test_cg.py | 39 +- .../tests/unit/group/test_groups_manager.py | 39 +- cinder/tests/unit/test_volume.py | 562 ++++++++---------- cinder/tests/unit/test_volume_rpcapi.py | 45 +- .../unit/volume/drivers/test_lvm_driver.py | 2 +- cinder/tests/unit/volume/drivers/test_rbd.py | 11 +- .../tests/unit/volume/test_manage_volume.py | 8 +- cinder/volume/manager.py | 307 +--------- cinder/volume/rpcapi.py | 132 ++-- 9 files changed, 352 insertions(+), 793 deletions(-) diff --git a/cinder/tests/unit/consistencygroup/test_cg.py b/cinder/tests/unit/consistencygroup/test_cg.py index 4b006cbef2e..33795b66aca 100644 --- a/cinder/tests/unit/consistencygroup/test_cg.py +++ b/cinder/tests/unit/consistencygroup/test_cg.py @@ -140,24 +140,22 @@ class ConsistencyGroupTestCase(test_volume.BaseVolumeTestCase): self.context, consistencygroup_id=group.id, **self.volume_params) - volume_id = volume['id'] - self.volume.create_volume(self.context, volume_id) + self.volume.create_volume(self.context, volume) volume2 = tests_utils.create_volume( self.context, consistencygroup_id=None, **self.volume_params) - volume_id2 = volume2['id'] - self.volume.create_volume(self.context, volume_id2) + self.volume.create_volume(self.context, volume2) fake_update_cg.return_value = ( {'status': fields.ConsistencyGroupStatus.AVAILABLE}, - [{'id': volume_id2, 'status': 'available'}], - [{'id': volume_id, 'status': 'available'}]) + [{'id': volume2.id, 'status': 'available'}], + [{'id': volume.id, 'status': 'available'}]) self.volume.update_consistencygroup(self.context, group, - add_volumes=volume_id2, - remove_volumes=volume_id) + add_volumes=volume2.id, + remove_volumes=volume.id) cg = objects.ConsistencyGroup.get_by_id(self.context, group.id) expected = { 'status': fields.ConsistencyGroupStatus.AVAILABLE, @@ -180,9 +178,9 @@ class ConsistencyGroupTestCase(test_volume.BaseVolumeTestCase): cgvolumes = db.volume_get_all_by_group(self.context, group.id) cgvol_ids = [cgvol['id'] for cgvol in cgvolumes] # Verify volume is removed. - self.assertNotIn(volume_id, cgvol_ids) + self.assertNotIn(volume.id, cgvol_ids) # Verify volume is added. - self.assertIn(volume_id2, cgvol_ids) + self.assertIn(volume2.id, cgvol_ids) self.volume_params['status'] = 'wrong-status' volume3 = tests_utils.create_volume( @@ -261,7 +259,7 @@ class ConsistencyGroupTestCase(test_volume.BaseVolumeTestCase): consistencygroup_id=group2.id, snapshot_id=snapshot_id, **self.volume_params) - self.volume.create_volume(self.context, volume2.id, volume=volume2) + self.volume.create_volume(self.context, volume2) self.volume.create_consistencygroup_from_src( self.context, group2, cgsnapshot=cgsnapshot) cg2 = objects.ConsistencyGroup.get_by_id(self.context, group2.id) @@ -328,7 +326,7 @@ class ConsistencyGroupTestCase(test_volume.BaseVolumeTestCase): consistencygroup_id=group3.id, source_volid=volume_id, **self.volume_params) - self.volume.create_volume(self.context, volume3.id, volume=volume3) + self.volume.create_volume(self.context, volume3) self.volume.create_consistencygroup_from_src( self.context, group3, source_cg=group) @@ -487,14 +485,13 @@ class ConsistencyGroupTestCase(test_volume.BaseVolumeTestCase): self.context, consistencygroup_id=group.id, **self.volume_params) - volume_id = volume['id'] - self.volume.create_volume(self.context, volume_id) + self.volume.create_volume(self.context, volume) self.assert_notify_called(mock_notify, (['INFO', 'volume.create.start'], ['INFO', 'volume.create.end'])) - cgsnapshot_returns = self._create_cgsnapshot(group.id, [volume_id]) + cgsnapshot_returns = self._create_cgsnapshot(group.id, [volume.id]) cgsnapshot = cgsnapshot_returns[0] self.volume.create_cgsnapshot(self.context, cgsnapshot) self.assertEqual(cgsnapshot.id, @@ -564,7 +561,7 @@ class ConsistencyGroupTestCase(test_volume.BaseVolumeTestCase): status='creating', size=1) self.volume.host = 'host1@backend1' - self.volume.create_volume(self.context, volume.id, volume=volume) + self.volume.create_volume(self.context, volume) self.volume.delete_consistencygroup(self.context, group) cg = objects.ConsistencyGroup.get_by_id( @@ -599,7 +596,7 @@ class ConsistencyGroupTestCase(test_volume.BaseVolumeTestCase): status='creating', size=1) self.volume.host = 'host1@backend2' - self.volume.create_volume(self.context, volume.id, volume=volume) + self.volume.create_volume(self.context, volume) self.assertRaises(exception.InvalidVolume, self.volume.delete_consistencygroup, @@ -656,8 +653,7 @@ class ConsistencyGroupTestCase(test_volume.BaseVolumeTestCase): self.context, consistencygroup_id=group.id, **self.volume_params) - volume_id = volume['id'] - self.volume.create_volume(self.context, volume_id) + self.volume.create_volume(self.context, volume) # Create a bootable volume bootable_vol_params = {'status': 'creating', 'host': CONF.host, 'size': 1, 'bootable': True} @@ -665,10 +661,9 @@ class ConsistencyGroupTestCase(test_volume.BaseVolumeTestCase): consistencygroup_id=group.id, **bootable_vol_params) # Create a common volume - bootable_vol_id = bootable_vol['id'] - self.volume.create_volume(self.context, bootable_vol_id) + self.volume.create_volume(self.context, bootable_vol) - volume_ids = [volume_id, bootable_vol_id] + volume_ids = [volume.id, bootable_vol.id] cgsnapshot_returns = self._create_cgsnapshot(group.id, volume_ids) cgsnapshot = cgsnapshot_returns[0] self.volume.create_cgsnapshot(self.context, cgsnapshot) diff --git a/cinder/tests/unit/group/test_groups_manager.py b/cinder/tests/unit/group/test_groups_manager.py index bc5a8703e6c..6a5a590e6f3 100644 --- a/cinder/tests/unit/group/test_groups_manager.py +++ b/cinder/tests/unit/group/test_groups_manager.py @@ -163,8 +163,7 @@ class GroupManagerTestCase(test.TestCase): volume_type_id=fake.VOLUME_TYPE_ID, status='available', host=group.host) - volume_id = volume['id'] - self.volume.create_volume(self.context, volume_id) + self.volume.create_volume(self.context, volume) volume2 = tests_utils.create_volume( self.context, @@ -172,17 +171,16 @@ class GroupManagerTestCase(test.TestCase): volume_type_id=fake.VOLUME_TYPE_ID, status='available', host=group.host) - volume_id2 = volume2['id'] - self.volume.create_volume(self.context, volume_id2) + self.volume.create_volume(self.context, volume) fake_update_grp.return_value = ( {'status': fields.GroupStatus.AVAILABLE}, - [{'id': volume_id2, 'status': 'available'}], - [{'id': volume_id, 'status': 'available'}]) + [{'id': volume2.id, 'status': 'available'}], + [{'id': volume.id, 'status': 'available'}]) self.volume.update_group(self.context, group, - add_volumes=volume_id2, - remove_volumes=volume_id) + add_volumes=volume2.id, + remove_volumes=volume.id) grp = objects.Group.get_by_id(self.context, group.id) expected = { 'status': fields.GroupStatus.AVAILABLE, @@ -206,9 +204,9 @@ class GroupManagerTestCase(test.TestCase): grpvolumes = db.volume_get_all_by_generic_group(self.context, group.id) grpvol_ids = [grpvol['id'] for grpvol in grpvolumes] # Verify volume is removed. - self.assertNotIn(volume_id, grpvol_ids) + self.assertNotIn(volume.id, grpvol_ids) # Verify volume is added. - self.assertIn(volume_id2, grpvol_ids) + self.assertIn(volume2.id, grpvol_ids) volume3 = tests_utils.create_volume( self.context, @@ -296,7 +294,7 @@ class GroupManagerTestCase(test.TestCase): status='available', host=group2.host, volume_type_id=fake.VOLUME_TYPE_ID) - self.volume.create_volume(self.context, volume2.id, volume=volume2) + self.volume.create_volume(self.context, volume2) self.volume.create_group_from_src( self.context, group2, group_snapshot=group_snapshot) grp2 = objects.Group.get_by_id(self.context, group2.id) @@ -368,7 +366,7 @@ class GroupManagerTestCase(test.TestCase): status='available', host=group3.host, volume_type_id=fake.VOLUME_TYPE_ID) - self.volume.create_volume(self.context, volume3.id, volume=volume3) + self.volume.create_volume(self.context, volume3) self.volume.create_group_from_src( self.context, group3, source_group=group) @@ -530,15 +528,14 @@ class GroupManagerTestCase(test.TestCase): group_id=group.id, host=group.host, volume_type_id=fake.VOLUME_TYPE_ID) - volume_id = volume['id'] - self.volume.create_volume(self.context, volume_id) + self.volume.create_volume(self.context, volume) self.assert_notify_called(mock_notify, (['INFO', 'volume.create.start'], ['INFO', 'volume.create.end'])) group_snapshot_returns = self._create_group_snapshot(group.id, - [volume_id]) + [volume.id]) group_snapshot = group_snapshot_returns[0] self.volume.create_group_snapshot(self.context, group_snapshot) self.assertEqual(group_snapshot.id, @@ -608,7 +605,7 @@ class GroupManagerTestCase(test.TestCase): volume_type_id=fake.VOLUME_TYPE_ID, size=1) self.volume.host = 'host1@backend1' - self.volume.create_volume(self.context, volume.id, volume=volume) + self.volume.create_volume(self.context, volume) self.volume.delete_group(self.context, group) grp = objects.Group.get_by_id( @@ -643,7 +640,7 @@ class GroupManagerTestCase(test.TestCase): volume_type_id=fake.VOLUME_TYPE_ID, size=1) self.volume.host = 'host1@backend2' - self.volume.create_volume(self.context, volume.id, volume=volume) + self.volume.create_volume(self.context, volume) self.assertRaises(exception.InvalidVolume, self.volume.delete_group, @@ -706,8 +703,7 @@ class GroupManagerTestCase(test.TestCase): group_id=group.id, host=group.host, volume_type_id=fake.VOLUME_TYPE_ID) - volume_id = volume['id'] - self.volume.create_volume(self.context, volume_id) + self.volume.create_volume(self.context, volume) # Create a bootable volume bootable_vol_params = {'status': 'creating', 'host': CONF.host, 'size': 1, 'bootable': True} @@ -715,10 +711,9 @@ class GroupManagerTestCase(test.TestCase): group_id=group.id, **bootable_vol_params) # Create a common volume - bootable_vol_id = bootable_vol['id'] - self.volume.create_volume(self.context, bootable_vol_id) + self.volume.create_volume(self.context, bootable_vol) - volume_ids = [volume_id, bootable_vol_id] + volume_ids = [volume.id, bootable_vol.id] group_snapshot_returns = self._create_group_snapshot(group.id, volume_ids) group_snapshot = group_snapshot_returns[0] diff --git a/cinder/tests/unit/test_volume.py b/cinder/tests/unit/test_volume.py index b06613a1d89..6c4f0ca97b4 100644 --- a/cinder/tests/unit/test_volume.py +++ b/cinder/tests/unit/test_volume.py @@ -227,10 +227,7 @@ class BaseVolumeTestCase(test.TestCase): 'volume_properties': self.volume_params, 'image_id': image_id, } - self.volume.create_volume(self.context, - volume.id, - request_spec, - volume=volume) + self.volume.create_volume(self.context, volume, request_spec) finally: # cleanup os.unlink(dst_path) @@ -336,11 +333,10 @@ class VolumeTestCase(BaseVolumeTestCase): """Test that init_host will unwedge a volume stuck in downloading.""" volume = tests_utils.create_volume(self.context, status='downloading', size=0, host=CONF.host) - volume_id = volume['id'] self.volume.init_host() volume.refresh() self.assertEqual("error", volume.status) - self.volume.delete_volume(self.context, volume_id, volume=volume) + self.volume.delete_volume(self.context, volume) def test_init_host_clears_uploads_available_volume(self): """init_host will clean an available volume stuck in uploading.""" @@ -407,11 +403,11 @@ class VolumeTestCase(BaseVolumeTestCase): vol0.refresh() expected_host = volutils.append_host(CONF.host, 'fake') self.assertEqual(expected_host, vol0.host) - self.volume.delete_volume(self.context, vol0.id, volume=vol0) - self.volume.delete_volume(self.context, vol1.id, volume=vol1) - self.volume.delete_volume(self.context, vol2.id, volume=vol2) - self.volume.delete_volume(self.context, vol3.id, volume=vol3) - self.volume.delete_volume(self.context, vol4.id, volume=vol4) + self.volume.delete_volume(self.context, vol0) + self.volume.delete_volume(self.context, vol1) + self.volume.delete_volume(self.context, vol2) + self.volume.delete_volume(self.context, vol3) + self.volume.delete_volume(self.context, vol4) @mock.patch.object(driver.BaseVD, "update_provider_info") def test_init_host_sync_provider_info(self, mock_update): @@ -444,8 +440,8 @@ class VolumeTestCase(BaseVolumeTestCase): # Clean up self.volume.delete_snapshot(self.context, snap0_obj) self.volume.delete_snapshot(self.context, snap1_obj) - self.volume.delete_volume(self.context, vol0.id) - self.volume.delete_volume(self.context, vol1.id) + self.volume.delete_volume(self.context, vol0) + self.volume.delete_volume(self.context, vol1) @mock.patch.object(driver.BaseVD, "update_provider_info") def test_init_host_sync_provider_info_no_update(self, mock_update): @@ -473,8 +469,8 @@ class VolumeTestCase(BaseVolumeTestCase): # Clean up self.volume.delete_snapshot(self.context, snap0_obj) self.volume.delete_snapshot(self.context, snap1_obj) - self.volume.delete_volume(self.context, vol0.id) - self.volume.delete_volume(self.context, vol1.id) + self.volume.delete_volume(self.context, vol0) + self.volume.delete_volume(self.context, vol1) @mock.patch('cinder.volume.manager.VolumeManager.' '_include_resources_in_cluster') @@ -561,11 +557,10 @@ class VolumeTestCase(BaseVolumeTestCase): volume = tests_utils.create_volume(self.context, status=status, size=0, host=CONF.host) - volume_id = volume['id'] self.volume.init_host() volume.refresh() self.assertEqual('error', volume.status) - self.volume.delete_volume(self.context, volume_id, volume=volume) + self.volume.delete_volume(self.context, volume) def test_create_snapshot_fails_with_creating_status(self): """Test init_host in case of snapshot. @@ -588,7 +583,7 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertEqual(fields.SnapshotStatus.ERROR, snapshot_obj.status) self.volume.delete_snapshot(self.context, snapshot_obj) - self.volume.delete_volume(self.context, volume.id, volume=volume) + self.volume.delete_volume(self.context, volume) @mock.patch('cinder.tests.unit.fake_notifier.FakeNotifier._notify') @mock.patch.object(QUOTAS, 'reserve') @@ -617,8 +612,7 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertIsNone(volume['encryption_key_id']) mock_notify.assert_not_called() self.assertRaises(exception.DriverNotInitialized, - self.volume.create_volume, - self.context, volume_id, volume=volume) + self.volume.create_volume, self.context, volume) volume = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual("error", volume.status) @@ -635,10 +629,9 @@ class VolumeTestCase(BaseVolumeTestCase): volume_id = volume['id'] self.assertRaises(exception.DriverNotInitialized, self.volume.create_volume, - self.context, volume_id, + self.context, volume, {'volume_properties': self.volume_params}, - {'retry': {'num_attempts': 1, 'host': []}}, - volume=volume) + {'retry': {'num_attempts': 1, 'host': []}}) # NOTE(dulek): Volume should be rescheduled as we passed request_spec # and filter_properties, assert that it wasn't counted in # allocated_capacity tracking. @@ -659,10 +652,9 @@ class VolumeTestCase(BaseVolumeTestCase): side_effect=processutils.ProcessExecutionError): self.assertRaises(processutils.ProcessExecutionError, self.volume.create_volume, - self.context, volume_id, + self.context, volume, {'volume_properties': params}, - {'retry': {'num_attempts': 1, 'host': []}}, - volume=volume) + {'retry': {'num_attempts': 1, 'host': []}}) # NOTE(dulek): Volume should be rescheduled as we passed request_spec # and filter_properties, assert that it wasn't counted in # allocated_capacity tracking. @@ -696,8 +688,7 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertIsNone(volume['encryption_key_id']) mock_notify.assert_not_called() self.assertRaises(exception.DriverNotInitialized, - self.volume.delete_volume, - self.context, volume.id, volume=volume) + self.volume.delete_volume, self.context, volume) volume = objects.Volume.get_by_id(self.context, volume.id) self.assertEqual("error_deleting", volume.status) @@ -719,13 +710,13 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertIsNone(volume['encryption_key_id']) - self.volume.create_volume(self.context, volume_id, volume=volume) + self.volume.create_volume(self.context, volume) self.assert_notify_called(mock_notify, (['INFO', 'volume.create.start'], ['INFO', 'volume.create.end'])) - self.volume.delete_volume(self.context, volume_id, volume=volume) + self.volume.delete_volume(self.context, volume) vol = db.volume_get(context.get_admin_context(read_deleted='yes'), volume_id) self.assertEqual(vol['status'], 'deleted') @@ -747,10 +738,10 @@ class VolumeTestCase(BaseVolumeTestCase): volume = tests_utils.create_volume(self.context, metadata=test_meta, **self.volume_params) volume_id = volume['id'] - self.volume.create_volume(self.context, volume_id, volume=volume) + self.volume.create_volume(self.context, volume) self.assertEqual(test_meta, volume.metadata) - self.volume.delete_volume(self.context, volume_id, volume=volume) + self.volume.delete_volume(self.context, volume) self.assertRaises(exception.NotFound, db.volume_get, self.context, @@ -786,7 +777,7 @@ class VolumeTestCase(BaseVolumeTestCase): FAKE_METADATA_TYPE = enum.Enum('METADATA_TYPES', 'fake_type') volume = tests_utils.create_volume(self.context, metadata=test_meta1, **self.volume_params) - self.volume.create_volume(self.context, volume.id, volume=volume) + self.volume.create_volume(self.context, volume) # update user metadata associated with the volume. result_meta = self.volume_api.update_volume_metadata( self.context, @@ -856,7 +847,7 @@ class VolumeTestCase(BaseVolumeTestCase): volume = tests_utils.create_volume(self.context, metadata=test_meta1, **self.volume_params) volume_id = volume['id'] - self.volume.create_volume(self.context, volume_id, volume=volume) + self.volume.create_volume(self.context, volume) # delete user metadata associated with the volume. self.volume_api.delete_volume_metadata( self.context, @@ -1132,7 +1123,7 @@ class VolumeTestCase(BaseVolumeTestCase): volume = tests_utils.create_volume(self.context, **volume_params_with_provider_id) - self.volume.create_volume(self.context, volume['id']) + self.volume.create_volume(self.context, volume) self.assertEqual(fake.PROVIDER_ID, volume['provider_id']) @mock.patch.object(key_manager, 'API', new=fake_keymgr.fake_api) @@ -1205,13 +1196,13 @@ class VolumeTestCase(BaseVolumeTestCase): """Test volume survives deletion if driver reports it as busy.""" volume = tests_utils.create_volume(self.context, **self.volume_params) volume_id = volume['id'] - self.volume.create_volume(self.context, volume_id, volume=volume) + self.volume.create_volume(self.context, volume) with mock.patch.object(self.volume.driver, 'delete_volume', side_effect=exception.VolumeIsBusy( volume_name='fake') ) as mock_del_vol: - self.volume.delete_volume(self.context, volume_id, volume=volume) + self.volume.delete_volume(self.context, volume) volume_ref = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual(volume_id, volume_ref.id) self.assertEqual("available", volume_ref.status) @@ -1222,7 +1213,7 @@ class VolumeTestCase(BaseVolumeTestCase): volume = tests_utils.create_volume(self.context, **self.volume_params) volume_id = volume['id'] - self.volume.create_volume(self.context, volume_id, volume=volume) + self.volume.create_volume(self.context, volume) another_context = context.RequestContext('another_user_id', 'another_project_id', @@ -1237,7 +1228,7 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertEqual(volume_id, volume_api.get(self.context, volume_id)['id']) - self.volume.delete_volume(self.context, volume_id, volume=volume) + self.volume.delete_volume(self.context, volume) def test_get_all_limit_bad_value(self): """Test value of 'limit' is numeric and >= 0""" @@ -1286,12 +1277,12 @@ class VolumeTestCase(BaseVolumeTestCase): """Test volume can be deleted in error_extending stats.""" # create a volume volume = tests_utils.create_volume(self.context, **self.volume_params) - self.volume.create_volume(self.context, volume.id, volume=volume) + self.volume.create_volume(self.context, volume) # delete 'error_extending' volume db.volume_update(self.context, volume['id'], {'status': 'error_extending'}) - self.volume.delete_volume(self.context, volume.id, volume=volume) + self.volume.delete_volume(self.context, volume) self.assertRaises(exception.NotFound, db.volume_get, self.context, volume['id']) @@ -1302,7 +1293,7 @@ class VolumeTestCase(BaseVolumeTestCase): """Test delete volume moves on if the volume does not exist.""" volume_id = '12345678-1234-5678-1234-567812345678' volume = objects.Volume(self.context, id=volume_id) - self.volume.delete_volume(self.context, volume_id, volume=volume) + self.volume.delete_volume(self.context, volume) self.assertTrue(mock_get_volume.called) @mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.' @@ -1311,18 +1302,15 @@ class VolumeTestCase(BaseVolumeTestCase): """Test volume can be created from a snapshot.""" volume_src = tests_utils.create_volume(self.context, **self.volume_params) - self.volume.create_volume(self.context, volume_src.id, - volume=volume_src) + self.volume.create_volume(self.context, volume_src) snapshot_id = create_snapshot(volume_src['id'], size=volume_src['size'])['id'] snapshot_obj = objects.Snapshot.get_by_id(self.context, snapshot_id) - self.volume.create_snapshot(self.context, volume_src['id'], - snapshot_obj) + self.volume.create_snapshot(self.context, snapshot_obj) volume_dst = tests_utils.create_volume(self.context, snapshot_id=snapshot_id, **self.volume_params) - self.volume.create_volume(self.context, volume_dst.id, - volume=volume_dst) + self.volume.create_volume(self.context, volume_dst) self.assertEqual(volume_dst['id'], db.volume_get( context.get_admin_context(), @@ -1331,11 +1319,9 @@ class VolumeTestCase(BaseVolumeTestCase): db.volume_get(context.get_admin_context(), volume_dst['id']).snapshot_id) - self.volume.delete_volume(self.context, volume_dst.id, - volume=volume_dst) + self.volume.delete_volume(self.context, volume_dst) self.volume.delete_snapshot(self.context, snapshot_obj) - self.volume.delete_volume(self.context, volume_src.id, - volume=volume_src) + self.volume.delete_volume(self.context, volume_src) @mock.patch('cinder.volume.flows.api.create_volume.get_flow') def test_create_volume_from_snapshot_with_types(self, _get_flow): @@ -1568,8 +1554,7 @@ class VolumeTestCase(BaseVolumeTestCase): def test_create_snapshot_driver_not_initialized(self): volume_src = tests_utils.create_volume(self.context, **self.volume_params) - self.volume.create_volume(self.context, volume_src.id, - volume=volume_src) + self.volume.create_volume(self.context, volume_src) snapshot_id = create_snapshot(volume_src['id'], size=volume_src['size'])['id'] snapshot_obj = objects.Snapshot.get_by_id(self.context, snapshot_id) @@ -1578,7 +1563,7 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertRaises(exception.DriverNotInitialized, self.volume.create_snapshot, - self.context, volume_src['id'], snapshot_obj) + self.context, snapshot_obj) # NOTE(flaper87): The volume status should be error. self.assertEqual(fields.SnapshotStatus.ERROR, snapshot_obj.status) @@ -1586,8 +1571,7 @@ class VolumeTestCase(BaseVolumeTestCase): # lets cleanup the mess self.volume.driver._initialized = True self.volume.delete_snapshot(self.context, snapshot_obj) - self.volume.delete_volume(self.context, volume_src.id, - volume=volume_src) + self.volume.delete_volume(self.context, volume_src) def _mock_synchronized(self, name, *s_args, **s_kwargs): def inner_sync1(f): @@ -1618,46 +1602,43 @@ class VolumeTestCase(BaseVolumeTestCase): # create source volume src_vol = tests_utils.create_volume(self.context, **self.volume_params) - src_vol_id = src_vol['id'] # no lock - self.volume.create_volume(self.context, src_vol_id, volume=src_vol) + self.volume.create_volume(self.context, src_vol) - snap_id = create_snapshot(src_vol_id, + snap_id = create_snapshot(src_vol.id, size=src_vol['size'])['id'] snapshot_obj = objects.Snapshot.get_by_id(self.context, snap_id) # no lock - self.volume.create_snapshot(self.context, src_vol_id, snapshot_obj) + self.volume.create_snapshot(self.context, snapshot_obj) dst_vol = tests_utils.create_volume(self.context, snapshot_id=snap_id, **self.volume_params) - dst_vol_id = dst_vol['id'] admin_ctxt = context.get_admin_context() # mock the flow runner so we can do some checks self.mock_object(engine.ActionEngine, 'run', mock_flow_run) # locked - self.volume.create_volume(self.context, volume_id=dst_vol_id, - request_spec={'snapshot_id': snap_id}, - volume=dst_vol) + self.volume.create_volume(self.context, dst_vol, + request_spec={'snapshot_id': snap_id}) mock_lock.assert_called_with('%s-delete_snapshot' % snap_id) - self.assertEqual(dst_vol_id, db.volume_get(admin_ctxt, dst_vol_id).id) + self.assertEqual(dst_vol.id, db.volume_get(admin_ctxt, dst_vol.id).id) self.assertEqual(snap_id, - db.volume_get(admin_ctxt, dst_vol_id).snapshot_id) + db.volume_get(admin_ctxt, dst_vol.id).snapshot_id) # locked - self.volume.delete_volume(self.context, dst_vol_id, volume=dst_vol) - mock_lock.assert_called_with('%s-delete_volume' % dst_vol_id) + self.volume.delete_volume(self.context, dst_vol) + mock_lock.assert_called_with('%s-delete_volume' % dst_vol.id) # locked self.volume.delete_snapshot(self.context, snapshot_obj) mock_lock.assert_called_with('%s-delete_snapshot' % snap_id) # locked - self.volume.delete_volume(self.context, src_vol_id, volume=src_vol) - mock_lock.assert_called_with('%s-delete_volume' % src_vol_id) + self.volume.delete_volume(self.context, src_vol) + mock_lock.assert_called_with('%s-delete_volume' % src_vol.id) self.assertTrue(mock_lvm_create.called) @@ -1680,7 +1661,7 @@ class VolumeTestCase(BaseVolumeTestCase): src_vol_id = src_vol['id'] # no lock - self.volume.create_volume(self.context, src_vol_id, volume=src_vol) + self.volume.create_volume(self.context, src_vol) self.assertEqual(0, mock_lock.call_count) dst_vol = tests_utils.create_volume(self.context, @@ -1693,20 +1674,19 @@ class VolumeTestCase(BaseVolumeTestCase): self.mock_object(engine.ActionEngine, 'run', mock_flow_run) # locked - self.volume.create_volume(self.context, volume_id=dst_vol_id, - request_spec={'source_volid': src_vol_id}, - volume=dst_vol) + self.volume.create_volume(self.context, dst_vol, + request_spec={'source_volid': src_vol_id}) mock_lock.assert_called_with('%s-delete_volume' % src_vol_id) self.assertEqual(dst_vol_id, db.volume_get(admin_ctxt, dst_vol_id).id) self.assertEqual(src_vol_id, db.volume_get(admin_ctxt, dst_vol_id).source_volid) # locked - self.volume.delete_volume(self.context, dst_vol_id, volume=dst_vol) + self.volume.delete_volume(self.context, dst_vol) mock_lock.assert_called_with('%s-delete_volume' % dst_vol_id) # locked - self.volume.delete_volume(self.context, src_vol_id, volume=src_vol) + self.volume.delete_volume(self.context, src_vol) mock_lock.assert_called_with('%s-delete_volume' % src_vol_id) def test_create_volume_from_volume_delete_lock_taken(self): @@ -1715,7 +1695,7 @@ class VolumeTestCase(BaseVolumeTestCase): src_vol_id = src_vol['id'] # no lock - self.volume.create_volume(self.context, src_vol_id, volume=src_vol) + self.volume.create_volume(self.context, src_vol) dst_vol = tests_utils.create_volume(self.context, source_volid=src_vol_id, @@ -1732,9 +1712,8 @@ class VolumeTestCase(BaseVolumeTestCase): # we expect this to block and then fail t = eventlet.spawn(self.volume.create_volume, self.context, - volume_id=dst_vol.id, - request_spec={'source_volid': src_vol_id}, - volume=dst_vol) + volume=dst_vol, + request_spec={'source_volid': src_vol_id}) gthreads.append(t) return orig_elevated(*args, **kwargs) @@ -1744,7 +1723,7 @@ class VolumeTestCase(BaseVolumeTestCase): self.mock_object(self.context, 'elevated', mock_elevated) # locked - self.volume.delete_volume(self.context, src_vol_id, volume=src_vol) + self.volume.delete_volume(self.context, src_vol) # we expect the volume create to fail with the following err since the # source volume was deleted while the create was locked. Note that the @@ -1753,7 +1732,7 @@ class VolumeTestCase(BaseVolumeTestCase): with mock.patch('sys.stderr', new=six.StringIO()): self.assertRaises(exception.VolumeNotFound, gthreads[0].wait) - def _raise_metadata_copy_failure(self, method, dst_vol_id, **kwargs): + def _raise_metadata_copy_failure(self, method, dst_vol): # MetadataCopyFailure exception will be raised if DB service is Down # while copying the volume glance metadata with mock.patch.object(db, method) as mock_db: @@ -1762,15 +1741,14 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertRaises(exception.MetadataCopyFailure, self.volume.create_volume, self.context, - dst_vol_id, - **kwargs) + dst_vol) # ensure that status of volume is 'error' - vol = db.volume_get(self.context, dst_vol_id) + vol = db.volume_get(self.context, dst_vol.id) self.assertEqual('error', vol['status']) # cleanup resource - db.volume_destroy(self.context, dst_vol_id) + db.volume_destroy(self.context, dst_vol.id) @mock.patch('cinder.utils.execute') def test_create_volume_from_volume_with_glance_volume_metadata_none( @@ -1780,7 +1758,7 @@ class VolumeTestCase(BaseVolumeTestCase): src_vol = tests_utils.create_volume(self.context, **self.volume_params) src_vol_id = src_vol['id'] - self.volume.create_volume(self.context, src_vol_id, volume=src_vol) + self.volume.create_volume(self.context, src_vol) # set bootable flag of volume to True db.volume_update(self.context, src_vol['id'], {'bootable': True}) @@ -1788,7 +1766,7 @@ class VolumeTestCase(BaseVolumeTestCase): dst_vol = tests_utils.create_volume(self.context, source_volid=src_vol_id, **self.volume_params) - self.volume.create_volume(self.context, dst_vol.id, volume=dst_vol) + self.volume.create_volume(self.context, dst_vol) self.assertRaises(exception.GlanceMetadataNotFound, db.volume_glance_metadata_copy_from_volume_to_volume, @@ -1810,7 +1788,7 @@ class VolumeTestCase(BaseVolumeTestCase): src_vol = tests_utils.create_volume(self.context, **self.volume_params) src_vol_id = src_vol['id'] - self.volume.create_volume(self.context, src_vol_id, volume=src_vol) + self.volume.create_volume(self.context, src_vol) # set bootable flag of volume to True db.volume_update(self.context, src_vol['id'], {'bootable': True}) @@ -1820,7 +1798,7 @@ class VolumeTestCase(BaseVolumeTestCase): **self.volume_params) self._raise_metadata_copy_failure( 'volume_glance_metadata_copy_from_volume_to_volume', - dst_vol.id, volume=dst_vol) + dst_vol) # cleanup resource db.volume_destroy(self.context, src_vol_id) @@ -1833,14 +1811,14 @@ class VolumeTestCase(BaseVolumeTestCase): src_vol = tests_utils.create_volume(self.context, **self.volume_params) src_vol_id = src_vol['id'] - self.volume.create_volume(self.context, src_vol_id, volume=src_vol) + self.volume.create_volume(self.context, src_vol) # set bootable flag of volume to True db.volume_update(self.context, src_vol['id'], {'bootable': True}) # create volume from snapshot snapshot_id = create_snapshot(src_vol['id'])['id'] snapshot_obj = objects.Snapshot.get_by_id(self.context, snapshot_id) - self.volume.create_snapshot(self.context, src_vol['id'], snapshot_obj) + self.volume.create_snapshot(self.context, snapshot_obj) # ensure that status of snapshot is 'available' self.assertEqual(fields.SnapshotStatus.AVAILABLE, snapshot_obj.status) @@ -1850,7 +1828,7 @@ class VolumeTestCase(BaseVolumeTestCase): **self.volume_params) self._raise_metadata_copy_failure( 'volume_glance_metadata_copy_to_volume', - dst_vol.id, volume=dst_vol) + dst_vol) # cleanup resource snapshot_obj.destroy() @@ -1867,7 +1845,7 @@ class VolumeTestCase(BaseVolumeTestCase): src_vol = tests_utils.create_volume(self.context, **self.volume_params) src_vol_id = src_vol['id'] - self.volume.create_volume(self.context, src_vol_id, volume=src_vol) + self.volume.create_volume(self.context, src_vol) # set bootable flag of volume to True db.volume_update(self.context, src_vol['id'], {'bootable': True}) @@ -1877,7 +1855,7 @@ class VolumeTestCase(BaseVolumeTestCase): **self.volume_params) self._raise_metadata_copy_failure( 'volume_glance_metadata_copy_from_volume_to_volume', - dst_vol.id, volume=dst_vol) + dst_vol) # cleanup resource db.volume_destroy(self.context, src_vol_id) @@ -1890,7 +1868,7 @@ class VolumeTestCase(BaseVolumeTestCase): src_vol = tests_utils.create_volume(self.context, **self.volume_params) src_vol_id = src_vol['id'] - self.volume.create_volume(self.context, src_vol_id, volume=src_vol) + self.volume.create_volume(self.context, src_vol) # set bootable flag of volume to True db.volume_update(self.context, src_vol['id'], {'bootable': True}) @@ -1899,7 +1877,7 @@ class VolumeTestCase(BaseVolumeTestCase): # create snapshot of volume snapshot_id = create_snapshot(volume['id'])['id'] snapshot_obj = objects.Snapshot.get_by_id(self.context, snapshot_id) - self.volume.create_snapshot(self.context, volume['id'], snapshot_obj) + self.volume.create_snapshot(self.context, snapshot_obj) # ensure that status of snapshot is 'available' self.assertEqual(fields.SnapshotStatus.AVAILABLE, snapshot_obj.status) @@ -1908,7 +1886,7 @@ class VolumeTestCase(BaseVolumeTestCase): dst_vol = tests_utils.create_volume(self.context, snapshot_id=snapshot_id, **self.volume_params) - self.volume.create_volume(self.context, dst_vol.id, volume=dst_vol) + self.volume.create_volume(self.context, dst_vol) self.assertRaises(exception.GlanceMetadataNotFound, db.volume_glance_metadata_copy_to_volume, @@ -1932,17 +1910,15 @@ class VolumeTestCase(BaseVolumeTestCase): volume_src = tests_utils.create_volume(self.context, **self.volume_params) - self.volume.create_volume(self.context, volume_src.id, - volume=volume_src) + self.volume.create_volume(self.context, volume_src) db.volume_update(self.context, volume_src['id'], {'bootable': True}) volume = db.volume_get(self.context, volume_src['id']) volume_dst = tests_utils.create_volume( self.context, **self.volume_params) - self.volume.create_volume(self.context, volume_dst.id, - {'source_replicaid': volume.id}, - volume=volume_dst) + self.volume.create_volume(self.context, volume_dst, + {'source_replicaid': volume.id}) self.assertRaises(exception.GlanceMetadataNotFound, db.volume_glance_metadata_copy_from_volume_to_volume, @@ -1960,24 +1936,22 @@ class VolumeTestCase(BaseVolumeTestCase): def test_create_volume_from_snapshot_delete_lock_taken(self): # create source volume src_vol = tests_utils.create_volume(self.context, **self.volume_params) - src_vol_id = src_vol['id'] # no lock - self.volume.create_volume(self.context, src_vol_id, volume=src_vol) + self.volume.create_volume(self.context, src_vol) # create snapshot - snap_id = create_snapshot(src_vol_id, + snap_id = create_snapshot(src_vol.id, size=src_vol['size'])['id'] snapshot_obj = objects.Snapshot.get_by_id(self.context, snap_id) # no lock - self.volume.create_snapshot(self.context, src_vol_id, snapshot_obj) + self.volume.create_snapshot(self.context, snapshot_obj) # create vol from snapshot... dst_vol = tests_utils.create_volume(self.context, snapshot_id=snap_id, - source_volid=src_vol_id, + source_volid=src_vol.id, **self.volume_params) - dst_vol_id = dst_vol['id'] orig_elevated = self.context.elevated @@ -1989,9 +1963,8 @@ class VolumeTestCase(BaseVolumeTestCase): # We expect this to block and then fail t = eventlet.spawn(self.volume.create_volume, self.context, - volume_id=dst_vol_id, - request_spec={'snapshot_id': snap_id}, - volume=dst_vol) + volume=dst_vol, + request_spec={'snapshot_id': snap_id}) gthreads.append(t) return orig_elevated(*args, **kwargs) @@ -2010,10 +1983,10 @@ class VolumeTestCase(BaseVolumeTestCase): with mock.patch('sys.stderr', new=six.StringIO()): self.assertRaises(exception.SnapshotNotFound, gthreads[0].wait) # locked - self.volume.delete_volume(self.context, src_vol_id, volume=src_vol) + self.volume.delete_volume(self.context, src_vol) # make sure it is gone self.assertRaises(exception.VolumeNotFound, db.volume_get, - self.context, src_vol_id) + self.context, src_vol.id) @mock.patch.object(key_manager, 'API', fake_keymgr.fake_api) def test_create_volume_from_snapshot_with_encryption(self): @@ -2167,12 +2140,10 @@ class VolumeTestCase(BaseVolumeTestCase): volume_src = tests_utils.create_volume(self.context, availability_zone='az2', **self.volume_params) - self.volume.create_volume(self.context, volume_src.id, - volume=volume_src) + self.volume.create_volume(self.context, volume_src) snapshot = create_snapshot(volume_src['id']) - self.volume.create_snapshot(self.context, volume_src['id'], - snapshot) + self.volume.create_snapshot(self.context, snapshot) volume_dst = volume_api.create(self.context, size=1, @@ -2247,15 +2218,13 @@ class VolumeTestCase(BaseVolumeTestCase): # initialize_connection() passes qos_specs that is designated to # be consumed by front-end or both front-end and back-end conn_info = self.volume.initialize_connection( - self.context, fake.VOLUME_ID, connector, - volume=fake_volume_obj) + self.context, fake_volume_obj, connector,) self.assertDictMatch(qos_specs_expected, conn_info['data']['qos_specs']) qos_values.update({'consumer': 'both'}) conn_info = self.volume.initialize_connection( - self.context, fake.VOLUME_ID, connector, - volume=fake_volume_obj) + self.context, fake_volume_obj, connector) self.assertDictMatch(qos_specs_expected, conn_info['data']['qos_specs']) # initialize_connection() skips qos_specs that is designated to be @@ -2263,8 +2232,7 @@ class VolumeTestCase(BaseVolumeTestCase): qos_values.update({'consumer': 'back-end'}) type_qos.return_value = dict(qos_specs=qos_values) conn_info = self.volume.initialize_connection( - self.context, fake.VOLUME_ID, connector, - volume=fake_volume_obj) + self.context, fake_volume_obj, connector) self.assertIsNone(conn_info['data']['qos_specs']) @mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'create_export') @@ -2280,8 +2248,7 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertRaises(exception.VolumeBackendAPIException, self.volume.initialize_connection, - self.context, fake.VOLUME_ID, connector, - volume=volume) + self.context, volume, connector) def test_run_attach_detach_volume_for_instance(self): """Make sure volume can be attached and detached from instance.""" @@ -2292,7 +2259,7 @@ class VolumeTestCase(BaseVolumeTestCase): admin_metadata={'readonly': 'True'}, **self.volume_params) volume_id = volume['id'] - self.volume.create_volume(self.context, volume_id, volume=volume) + self.volume.create_volume(self.context, volume) attachment = self.volume.attach_volume(self.context, volume_id, instance_uuid, None, mountpoint, 'ro') @@ -2312,19 +2279,18 @@ class VolumeTestCase(BaseVolumeTestCase): connector = {'initiator': 'iqn.2012-07.org.fake:01'} conn_info = self.volume.initialize_connection(self.context, - volume_id, connector) + volume, connector) self.assertEqual('ro', conn_info['data']['access_mode']) self.assertRaises(exception.VolumeAttached, self.volume.delete_volume, self.context, - volume_id, - volume=volume) + volume) self.volume.detach_volume(self.context, volume_id, attachment['id']) vol = db.volume_get(self.context, volume_id) self.assertEqual('available', vol['status']) - self.volume.delete_volume(self.context, volume_id, volume=volume) + self.volume.delete_volume(self.context, volume) self.assertRaises(exception.VolumeNotFound, db.volume_get, self.context, @@ -2374,7 +2340,7 @@ class VolumeTestCase(BaseVolumeTestCase): multiattach=True, **self.volume_params) volume_id = volume['id'] - self.volume.create_volume(self.context, volume_id, volume=volume) + self.volume.create_volume(self.context, volume) attachment = self.volume.attach_volume(self.context, volume_id, instance_uuid, None, mountpoint, 'ro') @@ -2397,13 +2363,12 @@ class VolumeTestCase(BaseVolumeTestCase): connector = {'initiator': 'iqn.2012-07.org.fake:01'} conn_info = self.volume.initialize_connection(self.context, - volume_id, connector) + volume, connector) self.assertEqual('ro', conn_info['data']['access_mode']) self.assertRaises(exception.VolumeAttached, self.volume.delete_volume, self.context, - volume_id, - volume=volume) + volume) self.assertRaises(exception.InvalidVolume, self.volume.detach_volume, @@ -2426,7 +2391,7 @@ class VolumeTestCase(BaseVolumeTestCase): vol = db.volume_get(self.context, volume_id) self.assertEqual('available', vol['status']) - self.volume.delete_volume(self.context, volume_id, volume=volume) + self.volume.delete_volume(self.context, volume) self.assertRaises(exception.VolumeNotFound, db.volume_get, self.context, @@ -2442,7 +2407,7 @@ class VolumeTestCase(BaseVolumeTestCase): multiattach=True, **self.volume_params) volume_id = volume['id'] - self.volume.create_volume(self.context, volume_id, volume=volume) + self.volume.create_volume(self.context, volume) attachment = self.volume.attach_volume(self.context, volume_id, instance_uuid, None, mountpoint, 'ro') @@ -2462,7 +2427,7 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertDictMatch(expected, ret) connector = {'initiator': 'iqn.2012-07.org.fake:01'} conn_info = self.volume.initialize_connection(self.context, - volume_id, connector) + volume, connector) self.assertEqual('ro', conn_info['data']['access_mode']) instance2_uuid = '12345678-1234-5678-1234-567812345000' @@ -2482,8 +2447,7 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertRaises(exception.VolumeAttached, self.volume.delete_volume, self.context, - volume_id, - volume=volume) + volume) self.volume.detach_volume(self.context, volume_id, attachment['id']) vol = db.volume_get(self.context, volume_id) self.assertEqual('in-use', vol['status']) @@ -2491,14 +2455,13 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertRaises(exception.VolumeAttached, self.volume.delete_volume, self.context, - volume_id, - volume=volume) + volume) self.volume.detach_volume(self.context, volume_id, attachment2['id']) vol = db.volume_get(self.context, volume_id) self.assertEqual('available', vol['status']) - self.volume.delete_volume(self.context, volume_id, volume=volume) + self.volume.delete_volume(self.context, volume) self.assertRaises(exception.VolumeNotFound, db.volume_get, self.context, @@ -2514,7 +2477,7 @@ class VolumeTestCase(BaseVolumeTestCase): multiattach=True, **self.volume_params) volume_id = volume['id'] - self.volume.create_volume(self.context, volume_id, volume=volume) + self.volume.create_volume(self.context, volume) attachment = self.volume.attach_volume(self.context, volume_id, instance_uuid, None, mountpoint, 'ro') @@ -2534,7 +2497,7 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertDictMatch(expected, ret) connector = {'initiator': 'iqn.2012-07.org.fake:01'} conn_info = self.volume.initialize_connection(self.context, - volume_id, connector) + volume, connector) self.assertEqual('ro', conn_info['data']['access_mode']) mountpoint2 = "/dev/sdx" @@ -2549,8 +2512,7 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertRaises(exception.VolumeAttached, self.volume.delete_volume, self.context, - volume_id, - volume=volume) + volume) def test_attach_detach_not_multiattach_volume_for_instances(self): """Make sure volume can't be attached to more than one instance.""" @@ -2562,7 +2524,7 @@ class VolumeTestCase(BaseVolumeTestCase): multiattach=False, **self.volume_params) volume_id = volume['id'] - self.volume.create_volume(self.context, volume_id, volume=volume) + self.volume.create_volume(self.context, volume) attachment = self.volume.attach_volume(self.context, volume_id, instance_uuid, None, mountpoint, 'ro') @@ -2582,7 +2544,7 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertDictMatch(expected, ret) connector = {'initiator': 'iqn.2012-07.org.fake:01'} conn_info = self.volume.initialize_connection(self.context, - volume_id, connector) + volume, connector) self.assertEqual('ro', conn_info['data']['access_mode']) instance2_uuid = '12345678-1234-5678-1234-567812345000' @@ -2598,13 +2560,12 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertRaises(exception.VolumeAttached, self.volume.delete_volume, self.context, - volume_id, - volume=volume) + volume) self.volume.detach_volume(self.context, volume_id, attachment['id']) vol = db.volume_get(self.context, volume_id) self.assertEqual('available', vol['status']) - self.volume.delete_volume(self.context, volume_id, volume=volume) + self.volume.delete_volume(self.context, volume) self.assertRaises(exception.VolumeNotFound, db.volume_get, self.context, @@ -2618,7 +2579,7 @@ class VolumeTestCase(BaseVolumeTestCase): admin_metadata={'readonly': 'False'}, **self.volume_params) volume_id = volume['id'] - self.volume.create_volume(self.context, volume_id, volume=volume) + self.volume.create_volume(self.context, volume) attachment = self.volume.attach_volume(self.context, volume_id, None, 'fake_host', mountpoint, 'rw') vol = db.volume_get(context.get_admin_context(), volume_id) @@ -2638,19 +2599,18 @@ class VolumeTestCase(BaseVolumeTestCase): connector = {'initiator': 'iqn.2012-07.org.fake:01'} conn_info = self.volume.initialize_connection(self.context, - volume_id, connector) + volume, connector) self.assertEqual('rw', conn_info['data']['access_mode']) self.assertRaises(exception.VolumeAttached, self.volume.delete_volume, self.context, - volume_id, - volume=volume) + volume) self.volume.detach_volume(self.context, volume_id, attachment['id']) vol = db.volume_get(self.context, volume_id) self.assertEqual("available", vol['status']) - self.volume.delete_volume(self.context, volume_id, volume=volume) + self.volume.delete_volume(self.context, volume) self.assertRaises(exception.VolumeNotFound, db.volume_get, self.context, @@ -2665,7 +2625,7 @@ class VolumeTestCase(BaseVolumeTestCase): multiattach=True, **self.volume_params) volume_id = volume['id'] - self.volume.create_volume(self.context, volume_id, volume=volume) + self.volume.create_volume(self.context, volume) attachment = self.volume.attach_volume(self.context, volume_id, None, 'fake_host', mountpoint, 'rw') vol = db.volume_get(context.get_admin_context(), volume_id) @@ -2685,7 +2645,7 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertDictMatch(expected, ret) connector = {'initiator': 'iqn.2012-07.org.fake:01'} conn_info = self.volume.initialize_connection(self.context, - volume_id, connector) + volume, connector) self.assertEqual('rw', conn_info['data']['access_mode']) mountpoint2 = "/dev/sdx" @@ -2703,8 +2663,7 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertRaises(exception.VolumeAttached, self.volume.delete_volume, self.context, - volume_id, - volume=volume) + volume) self.volume.detach_volume(self.context, volume_id, attachment['id']) vol = db.volume_get(self.context, volume_id) self.assertEqual("in-use", vol['status']) @@ -2713,7 +2672,7 @@ class VolumeTestCase(BaseVolumeTestCase): vol = db.volume_get(self.context, volume_id) self.assertEqual("available", vol['status']) - self.volume.delete_volume(self.context, volume_id, volume=volume) + self.volume.delete_volume(self.context, volume) self.assertRaises(exception.VolumeNotFound, db.volume_get, self.context, @@ -2728,7 +2687,7 @@ class VolumeTestCase(BaseVolumeTestCase): multiattach=True, **self.volume_params) volume_id = volume['id'] - self.volume.create_volume(self.context, volume_id, volume=volume) + self.volume.create_volume(self.context, volume) attachment = self.volume.attach_volume(self.context, volume_id, None, 'fake_host', mountpoint, 'rw') vol = db.volume_get(context.get_admin_context(), volume_id) @@ -2748,7 +2707,7 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertDictMatch(expected, ret) connector = {'initiator': 'iqn.2012-07.org.fake:01'} conn_info = self.volume.initialize_connection(self.context, - volume_id, connector) + volume, connector) self.assertEqual('rw', conn_info['data']['access_mode']) mountpoint2 = "/dev/sdx" @@ -2762,7 +2721,7 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertRaises(exception.VolumeAttached, self.volume.delete_volume, self.context, - volume_id, volume=volume) + volume) def test_run_attach_detach_not_multiattach_volume_for_hosts(self): """Make sure volume can't be attached to more than one host.""" @@ -2773,7 +2732,7 @@ class VolumeTestCase(BaseVolumeTestCase): multiattach=False, **self.volume_params) volume_id = volume['id'] - self.volume.create_volume(self.context, volume_id, volume=volume) + self.volume.create_volume(self.context, volume) attachment = self.volume.attach_volume(self.context, volume_id, None, 'fake_host', mountpoint, 'rw') vol = db.volume_get(context.get_admin_context(), volume_id) @@ -2793,7 +2752,7 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertDictMatch(expected, ret) connector = {'initiator': 'iqn.2012-07.org.fake:01'} conn_info = self.volume.initialize_connection(self.context, - volume_id, connector) + volume, connector) self.assertEqual('rw', conn_info['data']['access_mode']) mountpoint2 = "/dev/sdx" @@ -2816,13 +2775,12 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertRaises(exception.VolumeAttached, self.volume.delete_volume, self.context, - volume_id, - volume=volume) + volume) self.volume.detach_volume(self.context, volume_id, attachment['id']) vol = db.volume_get(self.context, volume_id) self.assertEqual('available', vol['status']) - self.volume.delete_volume(self.context, volume_id, volume=volume) + self.volume.delete_volume(self.context, volume) self.assertRaises(exception.VolumeNotFound, db.volume_get, self.context, @@ -2854,7 +2812,7 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertDictMatch(expected, ret) connector = {'initiator': 'iqn.2012-07.org.fake:01'} conn_info = self.volume.initialize_connection(self.context, - volume_id, connector) + volume, connector) self.assertEqual('ro', conn_info['data']['access_mode']) @@ -2887,7 +2845,7 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertDictMatch(expected, ret) connector = {'initiator': 'iqn.2012-07.org.fake:01'} conn_info = self.volume.initialize_connection(self.context, - volume_id, connector) + volume, connector) self.assertEqual('ro', conn_info['data']['access_mode']) self.volume.detach_volume(self.context, volume_id, @@ -2902,7 +2860,7 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertEqual('readonly', admin_metadata[0]['key']) self.assertEqual('True', admin_metadata[0]['value']) - self.volume.delete_volume(self.context, volume_id, volume=volume) + self.volume.delete_volume(self.context, volume) self.assertRaises(exception.VolumeNotFound, db.volume_get, self.context, @@ -2916,7 +2874,7 @@ class VolumeTestCase(BaseVolumeTestCase): admin_metadata={'readonly': 'True'}, **self.volume_params) volume_id = volume['id'] - self.volume.create_volume(self.context, volume_id, volume=volume) + self.volume.create_volume(self.context, volume) self.assertRaises(exception.InvalidVolumeAttachMode, self.volume.attach_volume, self.context, @@ -2971,7 +2929,7 @@ class VolumeTestCase(BaseVolumeTestCase): admin_metadata={'readonly': 'True'}, **self.volume_params) volume_id = volume['id'] - self.volume.create_volume(self.context, volume_id, volume=volume) + self.volume.create_volume(self.context, volume) volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidVolumeAttachMode, volume_api.attach, @@ -3017,7 +2975,7 @@ class VolumeTestCase(BaseVolumeTestCase): admin_metadata={'readonly': 'True'}, **self.volume_params) volume_id = volume['id'] - self.volume.create_volume(self.context, volume_id, volume=volume) + self.volume.create_volume(self.context, volume) self.volume.attach_volume(self.context, volume_id, instance_uuid, None, mountpoint, 'ro') # Change volume status to 'uploading' @@ -3085,7 +3043,7 @@ class VolumeTestCase(BaseVolumeTestCase): mock_notify.assert_not_called() - self.volume.create_volume(self.context, volume['id'], volume=volume) + self.volume.create_volume(self.context, volume) self.assert_notify_called(mock_notify, (['INFO', 'volume.create.start'], @@ -3093,7 +3051,7 @@ class VolumeTestCase(BaseVolumeTestCase): snapshot = create_snapshot(volume['id'], size=volume['size']) snapshot_id = snapshot.id - self.volume.create_snapshot(self.context, volume['id'], snapshot) + self.volume.create_snapshot(self.context, snapshot) self.assertEqual( snapshot_id, objects.Snapshot.get_by_id(self.context, snapshot_id).id) @@ -3120,7 +3078,7 @@ class VolumeTestCase(BaseVolumeTestCase): db.snapshot_get, self.context, snapshot_id) - self.volume.delete_volume(self.context, volume.id, volume=volume) + self.volume.delete_volume(self.context, volume) def test_create_delete_snapshot_with_metadata(self): """Test snapshot can be created with metadata and deleted.""" @@ -3241,7 +3199,7 @@ class VolumeTestCase(BaseVolumeTestCase): volume) # clean up - self.volume.delete_volume(self.context, volume.id, volume=volume) + self.volume.delete_volume(self.context, volume) def test_force_delete_volume(self): """Test volume can be forced to delete.""" @@ -3264,7 +3222,7 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertEqual('deleting', volume.status) # clean up - self.volume.delete_volume(self.context, volume.id, volume=volume) + self.volume.delete_volume(self.context, volume) def test_cannot_force_delete_attached_volume(self): """Test volume can't be force delete in attached state.""" @@ -3283,9 +3241,9 @@ class VolumeTestCase(BaseVolumeTestCase): def test_cannot_delete_volume_with_snapshots(self): """Test volume can't be deleted with dependent snapshots.""" volume = tests_utils.create_volume(self.context, **self.volume_params) - self.volume.create_volume(self.context, volume.id, volume=volume) + self.volume.create_volume(self.context, volume) snapshot = create_snapshot(volume['id'], size=volume['size']) - self.volume.create_snapshot(self.context, volume['id'], snapshot) + self.volume.create_snapshot(self.context, snapshot) self.assertEqual( snapshot.id, objects.Snapshot.get_by_id(self.context, snapshot.id).id) @@ -3300,7 +3258,7 @@ class VolumeTestCase(BaseVolumeTestCase): self.context, volume) self.volume.delete_snapshot(self.context, snapshot) - self.volume.delete_volume(self.context, volume.id, volume=volume) + self.volume.delete_volume(self.context, volume) def test_can_delete_errored_snapshot(self): """Test snapshot can be created and deleted.""" @@ -3313,7 +3271,7 @@ class VolumeTestCase(BaseVolumeTestCase): self.volume_api.delete_snapshot(self.context, snapshot) self.assertEqual(fields.SnapshotStatus.DELETING, snapshot.status) - self.volume.delete_volume(self.context, volume.id) + self.volume.delete_volume(self.context, volume) def test_cannot_delete_snapshot_with_bad_status(self): volume = tests_utils.create_volume(self.context, CONF.host) @@ -3330,7 +3288,7 @@ class VolumeTestCase(BaseVolumeTestCase): self.volume_api.delete_snapshot(self.context, snapshot) self.assertEqual(fields.SnapshotStatus.DELETING, snapshot.status) - self.volume.delete_volume(self.context, volume.id) + self.volume.delete_volume(self.context, volume) def test_create_snapshot_force(self): """Test snapshot in use can be created forcibly.""" @@ -3338,7 +3296,7 @@ class VolumeTestCase(BaseVolumeTestCase): instance_uuid = '12345678-1234-5678-1234-567812345678' # create volume and attach to the instance volume = tests_utils.create_volume(self.context, **self.volume_params) - self.volume.create_volume(self.context, volume.id, volume=volume) + self.volume.create_volume(self.context, volume) values = {'volume_id': volume['id'], 'instance_uuid': instance_uuid, 'attach_status': 'attaching', } @@ -3361,7 +3319,7 @@ class VolumeTestCase(BaseVolumeTestCase): # create volume and attach to the host volume = tests_utils.create_volume(self.context, **self.volume_params) - self.volume.create_volume(self.context, volume.id, volume=volume) + self.volume.create_volume(self.context, volume) values = {'volume_id': volume['id'], 'attached_host': 'fake_host', 'attach_status': 'attaching', } @@ -3402,7 +3360,7 @@ class VolumeTestCase(BaseVolumeTestCase): # create snapshot from bootable volume snap = create_snapshot(volume_id) - self.volume.create_snapshot(ctxt, volume_id, snap) + self.volume.create_snapshot(ctxt, snap) # get snapshot's volume_glance_metadata snap_glance_meta = db.volume_snapshot_glance_metadata_get( @@ -3458,7 +3416,6 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertRaises(exception.MetadataCopyFailure, self.volume.create_snapshot, ctxt, - volume_id, snap) # get snapshot's volume_glance_metadata @@ -3478,12 +3435,12 @@ class VolumeTestCase(BaseVolumeTestCase): volume = tests_utils.create_volume(self.context, **self.volume_params) volume_id = volume['id'] - self.volume.create_volume(self.context, volume_id, volume=volume) + self.volume.create_volume(self.context, volume) # set bootable flag of volume to True db.volume_update(self.context, volume_id, {'bootable': True}) snapshot = create_snapshot(volume['id']) - self.volume.create_snapshot(self.context, volume['id'], snapshot) + self.volume.create_snapshot(self.context, snapshot) self.assertRaises(exception.GlanceMetadataNotFound, db.volume_snapshot_glance_metadata_get, self.context, snapshot.id) @@ -3505,9 +3462,9 @@ class VolumeTestCase(BaseVolumeTestCase): volume = tests_utils.create_volume(self.context, **self.volume_params) volume_id = volume['id'] - self.volume.create_volume(self.context, volume_id, volume=volume) + self.volume.create_volume(self.context, volume) snapshot = create_snapshot(volume_id, size=volume['size']) - self.volume.create_snapshot(self.context, volume_id, snapshot) + self.volume.create_snapshot(self.context, snapshot) with mock.patch.object(self.volume.driver, 'delete_snapshot', side_effect=exception.SnapshotIsBusy( @@ -3533,10 +3490,10 @@ class VolumeTestCase(BaseVolumeTestCase): volume = tests_utils.create_volume(self.context, **self.volume_params) volume_id = volume['id'] - self.volume.create_volume(self.context, volume_id, volume=volume) + self.volume.create_volume(self.context, volume) snapshot = create_snapshot(volume_id) snapshot_id = snapshot.id - self.volume.create_snapshot(self.context, volume_id, snapshot) + self.volume.create_snapshot(self.context, snapshot) with mock.patch.object(self.volume.driver, 'delete_snapshot', side_effect=exception.SnapshotIsBusy( @@ -3598,7 +3555,7 @@ class VolumeTestCase(BaseVolumeTestCase): volume = self._create_volume_from_image() self.assertEqual('available', volume['status']) self.assertTrue(volume['bootable']) - self.volume.delete_volume(self.context, volume.id, volume=volume) + self.volume.delete_volume(self.context, volume) @mock.patch('cinder.image.image_utils.qemu_img_info') def test_create_volume_from_image_not_cloned_status_available( @@ -3615,7 +3572,7 @@ class VolumeTestCase(BaseVolumeTestCase): volume = self._create_volume_from_image(fakeout_clone_image=True) self.assertEqual('available', volume['status']) self.assertTrue(volume['bootable']) - self.volume.delete_volume(self.context, volume.id, volume=volume) + self.volume.delete_volume(self.context, volume) def test_create_volume_from_image_exception(self): """Test create volume from a non-existing image. @@ -3641,9 +3598,8 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertRaises(exception.ImageNotFound, self.volume.create_volume, self.context, - volume.id, - {'image_id': self.FAKE_UUID}, - volume=volume) + volume, + {'image_id': self.FAKE_UUID}) volume = objects.Volume.get_by_id(self.context, volume.id) self.assertEqual("error", volume['status']) self.assertFalse(volume['bootable']) @@ -3717,7 +3673,7 @@ class VolumeTestCase(BaseVolumeTestCase): volume = self._create_volume_from_image(clone_image_volume=True) self.assertEqual('available', volume['status']) self.assertTrue(volume['bootable']) - self.volume.delete_volume(self.context, volume.id, volume=volume) + self.volume.delete_volume(self.context, volume) def test_create_volume_from_exact_sized_image(self): """Test create volume from an image of the same size. @@ -3998,7 +3954,7 @@ class VolumeTestCase(BaseVolumeTestCase): volume, 3) # clean up - self.volume.delete_volume(self.context, volume.id, volume=volume) + self.volume.delete_volume(self.context, volume) def test_extend_volume_driver_not_initialized(self): """Test volume can be extended at API level.""" @@ -4007,21 +3963,21 @@ class VolumeTestCase(BaseVolumeTestCase): volume = tests_utils.create_volume(self.context, size=2, status='available', host=CONF.host) - self.volume.create_volume(self.context, volume.id, volume=volume) + self.volume.create_volume(self.context, volume) self.volume.driver._initialized = False self.assertRaises(exception.DriverNotInitialized, self.volume.extend_volume, - self.context, volume['id'], 3, - fake_reservations, volume=volume) + self.context, volume, 3, + fake_reservations) volume.refresh() self.assertEqual('error_extending', volume.status) # lets cleanup the mess. self.volume.driver._initialized = True - self.volume.delete_volume(self.context, volume.id, volume=volume) + self.volume.delete_volume(self.context, volume) def test_extend_volume_manager(self): """Test volume can be extended at the manager level.""" @@ -4031,7 +3987,7 @@ class VolumeTestCase(BaseVolumeTestCase): fake_reservations = ['RESERVATION'] volume = tests_utils.create_volume(self.context, size=2, status='creating', host=CONF.host) - self.volume.create_volume(self.context, volume.id, volume=volume) + self.volume.create_volume(self.context, volume) # Test driver exception with mock.patch.object(self.volume.driver, @@ -4039,8 +3995,8 @@ class VolumeTestCase(BaseVolumeTestCase): extend_volume.side_effect =\ exception.CinderException('fake exception') volume['status'] = 'extending' - self.volume.extend_volume(self.context, volume['id'], '4', - fake_reservations, volume=volume) + self.volume.extend_volume(self.context, volume, '4', + fake_reservations) volume.refresh() self.assertEqual(2, volume.size) self.assertEqual('error_extending', volume.status) @@ -4051,8 +4007,8 @@ class VolumeTestCase(BaseVolumeTestCase): with mock.patch.object(QUOTAS, 'commit') as quotas_commit: extend_volume.return_value = fake_extend volume.status = 'extending' - self.volume.extend_volume(self.context, volume.id, '4', - fake_reservations, volume=volume) + self.volume.extend_volume(self.context, volume, '4', + fake_reservations) volume.refresh() self.assertEqual(4, volume.size) self.assertEqual('available', volume.status) @@ -4062,7 +4018,7 @@ class VolumeTestCase(BaseVolumeTestCase): project_id=volume.project_id) # clean up - self.volume.delete_volume(self.context, volume.id, volume=volume) + self.volume.delete_volume(self.context, volume) @mock.patch('cinder.volume.rpcapi.VolumeAPI.extend_volume') def test_extend_volume_with_volume_type(self, mock_rpc_extend): @@ -4101,22 +4057,18 @@ class VolumeTestCase(BaseVolumeTestCase): volume_src = tests_utils.create_volume(self.context, **self.volume_params) - self.volume.create_volume(self.context, volume_src.id, - volume=volume_src) + self.volume.create_volume(self.context, volume_src) volume_dst = tests_utils.create_volume( self.context, **self.volume_params) - self.volume.create_volume(self.context, volume_dst.id, - {'source_replicaid': volume_src.id}, - volume=volume_dst) + self.volume.create_volume(self.context, volume_dst, + {'source_replicaid': volume_src.id}) self.assertEqual('available', db.volume_get(context.get_admin_context(), volume_dst['id']).status) self.assertTrue(_create_replica_test.called) - self.volume.delete_volume(self.context, volume_dst.id, - volume=volume_dst) - self.volume.delete_volume(self.context, volume_src.id, - volume=volume_src) + self.volume.delete_volume(self.context, volume_dst) + self.volume.delete_volume(self.context, volume_src) def test_create_volume_from_sourcevol(self): """Test volume can be created from a source volume.""" @@ -4127,19 +4079,15 @@ class VolumeTestCase(BaseVolumeTestCase): fake_create_cloned_volume) volume_src = tests_utils.create_volume(self.context, **self.volume_params) - self.volume.create_volume(self.context, volume_src.id, - volume=volume_src) + self.volume.create_volume(self.context, volume_src) volume_dst = tests_utils.create_volume(self.context, source_volid=volume_src['id'], **self.volume_params) - self.volume.create_volume(self.context, volume_dst.id, - volume=volume_dst) + self.volume.create_volume(self.context, volume_dst) volume_dst.refresh() self.assertEqual('available', volume_dst.status) - self.volume.delete_volume(self.context, volume_dst.id, - volume=volume_dst) - self.volume.delete_volume(self.context, volume_src.id, - volume=volume_src) + self.volume.delete_volume(self.context, volume_dst) + self.volume.delete_volume(self.context, volume_src) @mock.patch('cinder.volume.api.API.list_availability_zones', return_value=({'name': 'nova', 'available': True}, @@ -4151,8 +4099,7 @@ class VolumeTestCase(BaseVolumeTestCase): volume_src = tests_utils.create_volume(self.context, availability_zone='az2', **self.volume_params) - self.volume.create_volume(self.context, volume_src.id, - volume=volume_src) + self.volume.create_volume(self.context, volume_src) volume_src = db.volume_get(self.context, volume_src['id']) @@ -4186,13 +4133,11 @@ class VolumeTestCase(BaseVolumeTestCase): mock_qemu_info.return_value = image_info volume_src = self._create_volume_from_image() - self.volume.create_volume(self.context, volume_src.id, - volume=volume_src) + self.volume.create_volume(self.context, volume_src) volume_dst = tests_utils.create_volume(self.context, source_volid=volume_src['id'], **self.volume_params) - self.volume.create_volume(self.context, volume_dst.id, - volume=volume_dst) + self.volume.create_volume(self.context, volume_dst) self.assertEqual('available', db.volume_get(context.get_admin_context(), volume_dst['id']).status) @@ -4204,10 +4149,8 @@ class VolumeTestCase(BaseVolumeTestCase): for meta_dst in dst_glancemeta: if meta_dst.key == meta_src.key: self.assertEqual(meta_src.value, meta_dst.value) - self.volume.delete_volume(self.context, volume_src.id, - volume=volume_src) - self.volume.delete_volume(self.context, volume_dst.id, - volume=volume_dst) + self.volume.delete_volume(self.context, volume_src) + self.volume.delete_volume(self.context, volume_dst) def test_create_volume_from_sourcevol_failed_clone(self): """Test src vol status will be restore by error handling code.""" @@ -4220,8 +4163,7 @@ class VolumeTestCase(BaseVolumeTestCase): volume_src = tests_utils.create_volume(self.context, **self.volume_params) self.assertEqual('creating', volume_src.status) - self.volume.create_volume(self.context, volume_src.id, - volume=volume_src) + self.volume.create_volume(self.context, volume_src) self.assertEqual('available', volume_src.status) volume_dst = tests_utils.create_volume(self.context, source_volid=volume_src['id'], @@ -4230,15 +4172,12 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertRaises(exception.CinderException, self.volume.create_volume, self.context, - volume_dst.id, - volume=volume_dst) + volume_dst) # Source volume's status is still available and dst is set to error self.assertEqual('available', volume_src.status) self.assertEqual('error', volume_dst.status) - self.volume.delete_volume(self.context, volume_dst.id, - volume=volume_dst) - self.volume.delete_volume(self.context, volume_src.id, - volume=volume_src) + self.volume.delete_volume(self.context, volume_dst) + self.volume.delete_volume(self.context, volume_src) def test_clean_temporary_volume(self): def fake_delete_volume(ctxt, volume): @@ -4321,7 +4260,7 @@ class VolumeTestCase(BaseVolumeTestCase): volume = tests_utils.create_volume(self.context, admin_metadata={'readonly': 'True'}, **self.volume_params) - self.volume.create_volume(self.context, volume.id, volume=volume) + self.volume.create_volume(self.context, volume) volume.status = 'in-use' def sort_func(obj): @@ -4349,7 +4288,7 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertEqual('False', admin_metadata[0]['value']) # clean up - self.volume.delete_volume(self.context, volume.id, volume=volume) + self.volume.delete_volume(self.context, volume) def test_secure_file_operations_enabled(self): """Test secure file operations setting for base driver. @@ -4385,10 +4324,9 @@ class VolumeTestCase(BaseVolumeTestCase): test_vol_id = test_vol['id'] self.assertRaises(exception.DriverNotInitialized, self.volume.create_volume, - self.context, test_vol_id, + self.context, test_vol, {'volume_properties': self.volume_params}, - {'retry': {'num_attempts': 1, 'host': []}}, - volume=test_vol) + {'retry': {'num_attempts': 1, 'host': []}}) self.assertTrue(mock_reschedule.called) volume = db.volume_get(context.get_admin_context(), test_vol_id) self.assertEqual('creating', volume['status']) @@ -4404,19 +4342,18 @@ class VolumeTestCase(BaseVolumeTestCase): volume_id=test_vol_id) self.assertRaises(exception.VolumeNotFound, self.volume.create_volume, - self.context, test_vol_id, + self.context, test_vol, {'volume_properties': self.volume_params}, - {'retry': {'num_attempts': 1, 'host': []}}, - volume=test_vol) + {'retry': {'num_attempts': 1, 'host': []}}) volume = db.volume_get(context.get_admin_context(), test_vol_id) self.assertEqual('error', volume['status']) def test_cascade_delete_volume_with_snapshots(self): """Test volume deletion with dependent snapshots.""" volume = tests_utils.create_volume(self.context, **self.volume_params) - self.volume.create_volume(self.context, volume.id, volume=volume) + self.volume.create_volume(self.context, volume) snapshot = create_snapshot(volume['id'], size=volume['size']) - self.volume.create_snapshot(self.context, volume['id'], snapshot) + self.volume.create_snapshot(self.context, snapshot) self.assertEqual( snapshot.id, objects.Snapshot.get_by_id(self.context, snapshot.id).id) @@ -4433,9 +4370,9 @@ class VolumeTestCase(BaseVolumeTestCase): def test_cascade_delete_volume_with_snapshots_error(self): """Test volume deletion with dependent snapshots.""" volume = tests_utils.create_volume(self.context, **self.volume_params) - self.volume.create_volume(self.context, volume.id, volume=volume) + self.volume.create_volume(self.context, volume) snapshot = create_snapshot(volume['id'], size=volume['size']) - self.volume.create_snapshot(self.context, volume['id'], snapshot) + self.volume.create_snapshot(self.context, snapshot) self.assertEqual( snapshot.id, objects.Snapshot.get_by_id(self.context, snapshot.id).id) @@ -4672,8 +4609,7 @@ class VolumeTestCase(BaseVolumeTestCase): mock_size.return_value = 1 mock_manage.return_value = None - self.volume.manage_existing(self.context, None, 'volume_ref', - test_vol) + self.volume.manage_existing(self.context, test_vol, 'volume_ref') mock_notify.assert_called_with(self.context, test_vol, 'manage_existing.end', host=test_vol.host) @@ -4708,7 +4644,7 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertRaises(exception.VolumeBackendAPIException, self.volume.manage_existing, - self.context, test_vol.id, + self.context, test_vol, 'volume_ref') # check volume status volume = objects.Volume.get_by_id(context.get_admin_context(), @@ -4717,7 +4653,7 @@ class VolumeTestCase(BaseVolumeTestCase): # Delete this volume with 'error_managing_deleting' status in c-vol. test_vol.status = 'error_managing_deleting' test_vol.save() - self.volume.delete_volume(self.context, test_vol.id, volume=test_vol) + self.volume.delete_volume(self.context, test_vol) ctxt = context.get_admin_context(read_deleted='yes') volume = objects.Volume.get_by_id(ctxt, test_vol.id) self.assertEqual('deleted', volume.status) @@ -4754,7 +4690,7 @@ class VolumeTestCase(BaseVolumeTestCase): self.assertRaises(exception.VolumeBackendAPIException, self.volume.manage_existing, - self.context, test_vol.id, + self.context, test_vol, 'volume_ref') # check volume status volume = objects.Volume.get_by_id(context.get_admin_context(), @@ -4763,7 +4699,7 @@ class VolumeTestCase(BaseVolumeTestCase): # Delete this volume with 'error_managing_deleting' status in c-vol. test_vol.status = 'error_managing_deleting' test_vol.save() - self.volume.delete_volume(self.context, test_vol.id, volume=test_vol) + self.volume.delete_volume(self.context, test_vol) ctxt = context.get_admin_context(read_deleted='yes') volume = objects.Volume.get_by_id(ctxt, test_vol.id) self.assertEqual('deleted', volume.status) @@ -4801,8 +4737,7 @@ class VolumeMigrationTestCase(BaseVolumeTestCase): host=CONF.host, migration_status='migrating') host_obj = {'host': 'newhost', 'capabilities': {}} - self.volume.migrate_volume(self.context, volume.id, host_obj, False, - volume=volume) + self.volume.migrate_volume(self.context, volume, host_obj, False) # check volume properties volume = objects.Volume.get_by_id(context.get_admin_context(), @@ -4830,10 +4765,9 @@ class VolumeMigrationTestCase(BaseVolumeTestCase): self.assertRaises(processutils.ProcessExecutionError, self.volume.migrate_volume, self.context, - volume.id, + volume, host_obj, - False, - volume=volume) + False) volume = objects.Volume.get_by_id(context.get_admin_context(), volume.id) self.assertEqual('error', volume.migration_status) @@ -4863,8 +4797,7 @@ class VolumeMigrationTestCase(BaseVolumeTestCase): new_volume_obj, remote='dest') migrate_volume_completion.assert_called_with( - self.context, volume.id, new_volume_obj.id, error=False, - volume=volume, new_volume=new_volume_obj) + self.context, volume, new_volume_obj, error=False) self.assertFalse(update_server_volume.called) @mock.patch('cinder.compute.API') @@ -4913,8 +4846,8 @@ class VolumeMigrationTestCase(BaseVolumeTestCase): mock.patch.object(self.volume.driver, 'delete_volume') as \ delete_volume: create_volume.side_effect = self._fake_create_volume - self.volume.migrate_volume(self.context, fake_volume.id, - host_obj, True, volume=fake_volume) + self.volume.migrate_volume(self.context, fake_volume, host_obj, + True) volume = objects.Volume.get_by_id(context.get_admin_context(), fake_volume.id) self.assertEqual('newhost', volume.host) @@ -4944,10 +4877,9 @@ class VolumeMigrationTestCase(BaseVolumeTestCase): self.assertRaises(processutils.ProcessExecutionError, self.volume.migrate_volume, self.context, - volume.id, + volume, host_obj, - True, - volume=volume) + True) volume = objects.Volume.get_by_id(context.get_admin_context(), volume.id) self.assertEqual('error', volume.migration_status) @@ -4974,8 +4906,8 @@ class VolumeMigrationTestCase(BaseVolumeTestCase): mock_migrate_volume.side_effect = ( lambda x, y, z, new_type_id=None: ( True, {'user_id': fake.USER_ID})) - self.volume.migrate_volume(self.context, volume.id, host_obj, - False, volume=volume) + self.volume.migrate_volume(self.context, volume, host_obj, + False) self.assertEqual('newhost', volume.host) self.assertEqual('success', volume.migration_status) self.assertEqual(glance_metadata, volume.glance_metadata) @@ -5047,10 +4979,9 @@ class VolumeMigrationTestCase(BaseVolumeTestCase): self.assertRaises(exception.VolumeMigrationFailed, self.volume.migrate_volume, self.context, - volume.id, + volume, host_obj, - True, - volume=volume) + True) volume = objects.Volume.get_by_id(context.get_admin_context(), volume.id) self.assertEqual('error', volume['migration_status']) @@ -5077,10 +5008,9 @@ class VolumeMigrationTestCase(BaseVolumeTestCase): self.assertRaises(exception.VolumeMigrationFailed, self.volume.migrate_volume, self.context, - volume.id, + volume, host_obj, - True, - volume=volume) + True) volume = objects.Volume.get_by_id(context.get_admin_context(), volume.id) self.assertEqual('error', volume['migration_status']) @@ -5109,19 +5039,17 @@ class VolumeMigrationTestCase(BaseVolumeTestCase): self.assertRaises(processutils.ProcessExecutionError, self.volume.migrate_volume, self.context, - volume.id, + volume, host_obj, - True, - volume=volume) + True) volume = objects.Volume.get_by_id(context.get_admin_context(), volume.id) self.assertEqual('error', volume['migration_status']) self.assertEqual('available', volume['status']) def test_migrate_volume_generic_migrate_volume_completion_error(self): - def fake_migrate_volume_completion(ctxt, volume_id, new_volume_id, - error=False, volume=None, - new_volume=None): + def fake_migrate_volume_completion(ctxt, volume, new_volume, + error=False): db.volume_update(ctxt, volume['id'], {'migration_status': 'completing'}) raise processutils.ProcessExecutionError @@ -5158,10 +5086,9 @@ class VolumeMigrationTestCase(BaseVolumeTestCase): self.assertRaises(processutils.ProcessExecutionError, self.volume.migrate_volume, self.context, - volume.id, + volume, host_obj, - True, - volume=volume) + True) volume = db.volume_get(context.get_admin_context(), volume['id']) self.assertEqual('error', volume['migration_status']) self.assertEqual('available', volume['status']) @@ -5209,10 +5136,8 @@ class VolumeMigrationTestCase(BaseVolumeTestCase): mock_attach_volume.side_effect = self.fake_attach_volume old_volume_host = old_volume.host new_volume_host = new_volume.host - self.volume.migrate_volume_completion(self.context, old_volume.id, - new_volume.id, - volume=old_volume, - new_volume=new_volume) + self.volume.migrate_volume_completion(self.context, old_volume, + new_volume) after_new_volume = objects.Volume.get_by_id(self.context, new_volume.id) after_old_volume = objects.Volume.get_by_id(self.context, @@ -5401,20 +5326,18 @@ class VolumeMigrationTestCase(BaseVolumeTestCase): _mig.return_value = True if not exc: - self.volume.retype(self.context, volume.id, + self.volume.retype(self.context, volume, vol_type['id'], host_obj, migration_policy=policy, reservations=reservations, - old_reservations=old_reservations, - volume=volume) + old_reservations=old_reservations) else: self.assertRaises(exc, self.volume.retype, - self.context, volume.id, + self.context, volume, vol_type['id'], host_obj, migration_policy=policy, reservations=reservations, - old_reservations=old_reservations, - volume=volume) + old_reservations=old_reservations) # get volume/quota properties volume = objects.Volume.get_by_id(elevated, volume.id) @@ -5501,8 +5424,7 @@ class VolumeMigrationTestCase(BaseVolumeTestCase): self.volume.driver._initialized = False self.assertRaises(exception.DriverNotInitialized, self.volume.migrate_volume, - self.context, volume.id, host_obj, True, - volume=volume) + self.context, volume, host_obj, True) volume = objects.Volume.get_by_id(context.get_admin_context(), volume.id) @@ -5510,7 +5432,7 @@ class VolumeMigrationTestCase(BaseVolumeTestCase): # lets cleanup the mess. self.volume.driver._initialized = True - self.volume.delete_volume(self.context, volume['id'], volume=volume) + self.volume.delete_volume(self.context, volume) def test_delete_source_volume_in_migration(self): """Test deleting a source volume that is in migration.""" @@ -5523,15 +5445,15 @@ class VolumeMigrationTestCase(BaseVolumeTestCase): def _test_delete_volume_in_migration(self, migration_status): """Test deleting a volume that is in migration.""" volume = tests_utils.create_volume(self.context, **self.volume_params) - vol = db.volume_update(self.context, volume.id, - {'status': 'available', - 'migration_status': migration_status}) - self.volume.delete_volume(self.context, vol['id'], volume=volume) + volume.status = 'available' + volume.migration_status = migration_status + volume.save() + self.volume.delete_volume(self.context, volume) # The volume is successfully removed during the volume delete # and won't exist in the database any more. self.assertRaises(exception.VolumeNotFound, db.volume_get, - self.context, vol['id']) + self.context, volume.id) class ReplicationTestCase(BaseVolumeTestCase): @@ -5874,8 +5796,8 @@ class CopyVolumeToImageTestCase(BaseVolumeTestCase): self.volume_attrs['instance_uuid'] = None db.volume_create(self.context, self.volume_attrs) - def fake_create(context, volume_id, **kwargs): - db.volume_update(context, volume_id, {'status': 'available'}) + def fake_create(context, volume, **kwargs): + db.volume_update(context, volume.id, {'status': 'available'}) mock_create.side_effect = fake_create @@ -6105,12 +6027,6 @@ class DriverTestCase(test.TestCase): """Attach volumes to an instance.""" return [] - def _detach_volume(self, volume_id_list): - """Detach volumes from an instance.""" - for volume_id in volume_id_list: - db.volume_detached(self.context, volume_id) - self.volume.delete_volume(self.context, volume_id) - @ddt.ddt class GenericVolumeDriverTestCase(DriverTestCase): @@ -6729,7 +6645,7 @@ class DiscardFlagTestCase(BaseVolumeTestCase): mock_vol.get_by_id.return_value = volume conn_info = self.volume.initialize_connection(self.context, - volume.id, + volume, connector) self.assertEqual(expected_flag, conn_info['data'].get('discard')) diff --git a/cinder/tests/unit/test_volume_rpcapi.py b/cinder/tests/unit/test_volume_rpcapi.py index f2251b0c4bc..f02c49b5f11 100644 --- a/cinder/tests/unit/test_volume_rpcapi.py +++ b/cinder/tests/unit/test_volume_rpcapi.py @@ -16,9 +16,7 @@ Unit Tests for cinder.volume.rpcapi """ import copy -import mock -import ddt from oslo_config import cfg from oslo_serialization import jsonutils @@ -40,7 +38,6 @@ from cinder.volume import utils CONF = cfg.CONF -@ddt.ddt class VolumeRpcAPITestCase(test.TestCase): def setUp(self): @@ -55,9 +52,6 @@ class VolumeRpcAPITestCase(test.TestCase): vol['size'] = 1 volume = db.volume_create(self.context, vol) - self.patch('oslo_messaging.RPCClient.can_send_version', - return_value=True) - kwargs = { 'status': fields.SnapshotStatus.CREATING, 'progress': '0%', @@ -347,8 +341,7 @@ class VolumeRpcAPITestCase(test.TestCase): self._test_volume_api('delete_cgsnapshot', rpc_method='cast', cgsnapshot=self.fake_cgsnap, version='3.0') - @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) - def test_create_volume(self, can_send_version): + def test_create_volume(self): self._test_volume_api('create_volume', rpc_method='cast', volume=self.fake_volume_obj, @@ -357,21 +350,6 @@ class VolumeRpcAPITestCase(test.TestCase): filter_properties='fake_properties', allow_reschedule=True, version='3.0') - can_send_version.assert_has_calls([mock.call('3.0')]) - - @mock.patch('oslo_messaging.RPCClient.can_send_version', - return_value=False) - def test_create_volume_serialization(self, can_send_version): - request_spec = {"metadata": self.fake_volume_metadata} - self._test_volume_api('create_volume', - rpc_method='cast', - volume=self.fake_volume_obj, - host='fake_host1', - request_spec=request_spec, - filter_properties='fake_properties', - allow_reschedule=True, - version='2.0') - can_send_version.assert_has_calls([mock.call('3.0'), mock.call('2.4')]) def test_delete_volume(self): self._test_volume_api('delete_volume', @@ -448,21 +426,13 @@ class VolumeRpcAPITestCase(test.TestCase): 'disk_format': 'fake_type'}, version='3.0') - @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) - def test_initialize_connection(self, mock_can_send_version): + def test_initialize_connection(self): self._test_volume_api('initialize_connection', rpc_method='call', volume=self.fake_volume_obj, connector='fake_connector', version='3.0') - mock_can_send_version.return_value = False - self._test_volume_api('initialize_connection', - rpc_method='call', - volume=self.fake_volume_obj, - connector='fake_connector', - version='2.0') - def test_terminate_connection(self): self._test_volume_api('terminate_connection', rpc_method='call', @@ -526,19 +496,14 @@ class VolumeRpcAPITestCase(test.TestCase): old_reservations=self.fake_reservations, version='3.0') - @ddt.data('2.0', '2.2', '3.0') - @mock.patch('oslo_messaging.RPCClient.can_send_version') - def test_manage_existing(self, version, can_send_version): - can_send_version.side_effect = lambda x: x == version + def test_manage_existing(self): self._test_volume_api('manage_existing', rpc_method='cast', volume=self.fake_volume_obj, ref={'lv_name': 'foo'}, - version=version) - can_send_version.assert_has_calls([mock.call('3.0')]) + version='3.0') - @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) - def test_manage_existing_snapshot(self, mock_can_send_version): + def test_manage_existing_snapshot(self): volume_update = {'host': 'fake_host'} snpshot = { 'id': fake.SNAPSHOT_ID, diff --git a/cinder/tests/unit/volume/drivers/test_lvm_driver.py b/cinder/tests/unit/volume/drivers/test_lvm_driver.py index 07edbc82e78..e40f3f3619e 100644 --- a/cinder/tests/unit/volume/drivers/test_lvm_driver.py +++ b/cinder/tests/unit/volume/drivers/test_lvm_driver.py @@ -906,7 +906,7 @@ class LVMISCSITestCase(test_volume.DriverTestCase): vol = {} vol['size'] = 0 vol_ref = db.volume_create(self.context, vol) - self.volume.create_volume(self.context, vol_ref['id']) + self.volume.create_volume(self.context, vol_ref) vol_ref = db.volume_get(self.context, vol_ref['id']) # each volume has a different mountpoint diff --git a/cinder/tests/unit/volume/drivers/test_rbd.py b/cinder/tests/unit/volume/drivers/test_rbd.py index d0a182bea9c..298a9ad371e 100644 --- a/cinder/tests/unit/volume/drivers/test_rbd.py +++ b/cinder/tests/unit/volume/drivers/test_rbd.py @@ -1204,17 +1204,14 @@ class ManagedRBDTestCase(test_volume.DriverTestCase): try: if not clone_error: - self.volume.create_volume(self.context, - volume.id, - request_spec={'image_id': image_id}, - volume=volume) + self.volume.create_volume(self.context, volume, + request_spec={'image_id': image_id}) else: self.assertRaises(exception.CinderException, self.volume.create_volume, self.context, - volume.id, - request_spec={'image_id': image_id}, - volume=volume) + volume, + request_spec={'image_id': image_id}) volume = objects.Volume.get_by_id(self.context, volume.id) self.assertEqual(expected_status, volume.status) diff --git a/cinder/tests/unit/volume/test_manage_volume.py b/cinder/tests/unit/volume/test_manage_volume.py index 1df5e51b347..0118ead684e 100644 --- a/cinder/tests/unit/volume/test_manage_volume.py +++ b/cinder/tests/unit/volume/test_manage_volume.py @@ -50,19 +50,15 @@ class ManageVolumeTestCase(test_volume.BaseVolumeTestCase): def test_manage_existing(self): volume_object = self._stub_volume_object_get(self) - mock_object_volume = self.mock_object( - objects.Volume, 'get_by_id', mock.Mock(return_value=volume_object)) mock_run_flow_engine = self.mock_object( self.manager, '_run_manage_existing_flow_engine', mock.Mock(return_value=volume_object)) mock_update_volume_stats = self.mock_object( self.manager, '_update_stats_for_managed') - result = self.manager.manage_existing(self.context, volume_object.id) + result = self.manager.manage_existing(self.context, volume_object) self.assertEqual(fake.VOLUME_ID, result) - mock_object_volume.assert_called_once_with(self.context, - volume_object.id) mock_run_flow_engine.assert_called_once_with(self.context, volume_object, None) @@ -78,7 +74,7 @@ class ManageVolumeTestCase(test_volume.BaseVolumeTestCase): self.manager, '_update_stats_for_managed') result = self.manager.manage_existing( - self.context, volume_object.id, volume=volume_object) + self.context, volume_object) self.assertEqual(fake.VOLUME_ID, result) mock_object_volume.assert_not_called() diff --git a/cinder/volume/manager.py b/cinder/volume/manager.py index 48a6f4b17b7..d55317606a8 100644 --- a/cinder/volume/manager.py +++ b/cinder/volume/manager.py @@ -173,7 +173,7 @@ class VolumeManager(manager.SchedulerDependentManager): RPC_API_VERSION = volume_rpcapi.VolumeAPI.RPC_API_VERSION - target = messaging.Target(version='2.6') + target = messaging.Target(version=RPC_API_VERSION) # On cloning a volume, we shouldn't copy volume_type, consistencygroup # and volume_attachment, because the db sets that according to [field]_id, @@ -190,7 +190,6 @@ class VolumeManager(manager.SchedulerDependentManager): # update_service_capabilities needs service_name to be volume super(VolumeManager, self).__init__(service_name='volume', *args, **kwargs) - self.additional_endpoints.append(_VolumeV3Proxy(self)) self.configuration = config.Configuration(volume_manager_opts, config_group=service_name) self.stats = {} @@ -488,13 +487,11 @@ class VolumeManager(manager.SchedulerDependentManager): # Offload all the pending volume delete operations to the # threadpool to prevent the main volume service thread # from being blocked. - self._add_to_threadpool(self.delete_volume, ctxt, - volume['id'], volume=volume, + self._add_to_threadpool(self.delete_volume, ctxt, volume, cascade=True) else: # By default, delete volumes sequentially - self.delete_volume(ctxt, volume['id'], volume=volume, - cascade=True) + self.delete_volume(ctxt, volume, cascade=True) LOG.info(_LI("Resume volume delete completed successfully."), resource=volume) @@ -554,24 +551,12 @@ class VolumeManager(manager.SchedulerDependentManager): """ return self.driver.initialized - def create_volume(self, context, volume_id, request_spec=None, - filter_properties=None, allow_reschedule=True, - volume=None): + def create_volume(self, context, volume, request_spec=None, + filter_properties=None, allow_reschedule=True): """Creates the volume.""" # Log about unsupported drivers utils.log_unsupported_driver_warning(self.driver) - # FIXME(dulek): Remove this in v3.0 of RPC API. - if volume is None: - # For older clients, mimic the old behavior and look up the volume - # by its volume_id. - volume = objects.Volume.get_by_id(context, volume_id) - - # FIXME(dulek): Remove this in v3.0 of RPC API. - if isinstance(request_spec, dict): - # We may receive request_spec as dict from older clients. - request_spec = objects.RequestSpec.from_primitives(request_spec) - context_elevated = context.elevated() if filter_properties is None: filter_properties = {} @@ -656,11 +641,8 @@ class VolumeManager(manager.SchedulerDependentManager): LOG.info(_LI("Created volume successfully."), resource=volume) return volume.id - # FIXME(bluex): replace volume_id with volume.id when volume_id is removed - @coordination.synchronized('{volume_id}-{f_name}') - def delete_volume(self, context, volume_id, - unmanage_only=False, - volume=None, + @coordination.synchronized('{volume.id}-{f_name}') + def delete_volume(self, context, volume, unmanage_only=False, cascade=False): """Deletes and unexports volume. @@ -675,15 +657,11 @@ class VolumeManager(manager.SchedulerDependentManager): context = context.elevated() try: - # FIXME(dulek): Remove this in v3.0 of RPC API. - if volume is None: - volume = objects.Volume.get_by_id(context, volume_id) - else: - volume.refresh() + volume.refresh() except exception.VolumeNotFound: # NOTE(thingee): It could be possible for a volume to # be deleted when resuming deletes from init_host(). - LOG.debug("Attempted delete of non-existent volume: %s", volume_id) + LOG.debug("Attempted delete of non-existent volume: %s", volume.id) return if context.project_id != volume.project_id: @@ -693,7 +671,7 @@ class VolumeManager(manager.SchedulerDependentManager): if volume['attach_status'] == "attached": # Volume is still attached, need to detach first - raise exception.VolumeAttached(volume_id=volume_id) + raise exception.VolumeAttached(volume_id=volume.id) if vol_utils.extract_host(volume.host) != self.host: raise exception.InvalidVolume( reason=_("volume is not local to this node")) @@ -779,7 +757,7 @@ class VolumeManager(manager.SchedulerDependentManager): resource=volume) # Delete glance metadata if it exists - self.db.volume_glance_metadata_delete_by_volume(context, volume_id) + self.db.volume_glance_metadata_delete_by_volume(context, volume.id) volume.destroy() @@ -823,7 +801,7 @@ class VolumeManager(manager.SchedulerDependentManager): volume_ref.status = status volume_ref.save() - def create_snapshot(self, context, volume_id, snapshot): + def create_snapshot(self, context, snapshot): """Creates and exports the snapshot.""" context = context.elevated() @@ -1175,7 +1153,7 @@ class VolumeManager(manager.SchedulerDependentManager): LOG.warning(_LW('Failed to create new image-volume cache entry.' ' Error: %(exception)s'), {'exception': e}) if image_volume: - self.delete_volume(ctx, image_volume.id) + self.delete_volume(ctx, image_volume) def _clone_image_volume(self, ctx, volume, image_meta): volume_type_id = volume.get('volume_type_id') @@ -1209,8 +1187,7 @@ class VolumeManager(manager.SchedulerDependentManager): project_id=new_vol_values['project_id']) try: - self.create_volume(ctx, image_volume.id, - allow_reschedule=False, volume=image_volume) + self.create_volume(ctx, image_volume, allow_reschedule=False) image_volume = self.db.volume_get(ctx, image_volume.id) if image_volume.status != 'available': raise exception.InvalidVolume(_('Volume is not available.')) @@ -1226,7 +1203,7 @@ class VolumeManager(manager.SchedulerDependentManager): {'volume_id': volume.id, 'image_id': image_meta['id']}) try: - self.delete_volume(ctx, image_volume.id) + self.delete_volume(ctx, image_volume) except exception.CinderException: LOG.exception(_LE('Could not delete the image volume %(id)s.'), {'id': volume.id}) @@ -1351,8 +1328,7 @@ class VolumeManager(manager.SchedulerDependentManager): exc_info=True, resource={'type': 'image', 'id': image_id}) - def initialize_connection(self, context, volume_id, connector, - volume=None): + def initialize_connection(self, context, volume, connector): """Prepare volume for connection from host represented by connector. This method calls the driver initialize_connection and returns @@ -1389,11 +1365,6 @@ class VolumeManager(manager.SchedulerDependentManager): json in various places, so it should not contain any non-json data types. """ - # FIXME(bluex): Remove this in v3.0 of RPC API. - if volume is None: - # For older clients, mimic the old behavior and look up the volume - # by its volume_id. - volume = objects.Volume.get_by_id(context, volume_id) # NOTE(flaper87): Verify the driver is enabled # before going forward. The exception will be caught @@ -1592,7 +1563,7 @@ class VolumeManager(manager.SchedulerDependentManager): self.db.volume_update(ctxt, volume['id'], {'status': status}) else: - conn = self.initialize_connection(ctxt, volume['id'], properties) + conn = self.initialize_connection(ctxt, volume, properties) attach_info = self._connect_device(conn) try: @@ -1772,11 +1743,8 @@ class VolumeManager(manager.SchedulerDependentManager): remote='dest') # The above call is synchronous so we complete the migration - self.migrate_volume_completion(ctxt, volume.id, - new_volume.id, - error=False, - volume=volume, - new_volume=new_volume) + self.migrate_volume_completion(ctxt, volume, new_volume, + error=False) else: nova_api = compute.API() # This is an async call to Nova, which will call the completion @@ -1833,15 +1801,7 @@ class VolumeManager(manager.SchedulerDependentManager): "source volume may have been deleted."), {'vol': new_volume.id}) - def migrate_volume_completion(self, ctxt, volume_id, new_volume_id, - error=False, volume=None, new_volume=None): - # FIXME(dulek): Remove this in v3.0 of RPC API. - if volume is None or new_volume is None: - # For older clients, mimic the old behavior and look up the volume - # by its volume_id. - volume = objects.Volume.get_by_id(ctxt, volume_id) - new_volume = objects.Volume.get_by_id(ctxt, new_volume_id) - + def migrate_volume_completion(self, ctxt, volume, new_volume, error=False): try: # NOTE(flaper87): Verify the driver is enabled # before going forward. The exception will be caught @@ -1926,15 +1886,9 @@ class VolumeManager(manager.SchedulerDependentManager): resource=volume) return volume.id - def migrate_volume(self, ctxt, volume_id, host, force_host_copy=False, - new_type_id=None, volume=None): + def migrate_volume(self, ctxt, volume, host, force_host_copy=False, + new_type_id=None): """Migrate the volume to the specified host (called on source host).""" - # FIXME(dulek): Remove this in v3.0 of RPC API. - if volume is None: - # For older clients, mimic the old behavior and look up the volume - # by its volume_id. - volume = objects.Volume.get_by_id(ctxt, volume_id) - try: # NOTE(flaper87): Verify the driver is enabled # before going forward. The exception will be caught @@ -2146,14 +2100,7 @@ class VolumeManager(manager.SchedulerDependentManager): context, snapshot, event_suffix, extra_usage_info=extra_usage_info, host=self.host) - def extend_volume(self, context, volume_id, new_size, reservations, - volume=None): - # FIXME(dulek): Remove this in v3.0 of RPC API. - if volume is None: - # For older clients, mimic the old behavior and look up the volume - # by its volume_id. - volume = objects.Volume.get_by_id(context, volume_id) - + def extend_volume(self, context, volume, new_size, reservations): try: # NOTE(flaper87): Verify the driver is enabled # before going forward. The exception will be caught @@ -2204,9 +2151,9 @@ class VolumeManager(manager.SchedulerDependentManager): LOG.info(_LI("Extend volume completed successfully."), resource=volume) - def retype(self, context, volume_id, new_type_id, host, + def retype(self, context, volume, new_type_id, host, migration_policy='never', reservations=None, - volume=None, old_reservations=None): + old_reservations=None): def _retype_error(context, volume, old_reservations, new_reservations, status_update): @@ -2217,12 +2164,6 @@ class VolumeManager(manager.SchedulerDependentManager): QUOTAS.rollback(context, old_reservations) QUOTAS.rollback(context, new_reservations) - # FIXME(dulek): Remove this in v3.0 of RPC API. - if volume is None: - # For older clients, mimic the old behavior and look up the volume - # by its volume_id. - volume = objects.Volume.get_by_id(context, volume_id) - status_update = {'status': volume.previous_status} if context.project_id != volume.project_id: project_id = volume.project_id @@ -2346,7 +2287,7 @@ class VolumeManager(manager.SchedulerDependentManager): volume.save() try: - self.migrate_volume(context, volume.id, host, + self.migrate_volume(context, volume, host, new_type_id=new_type_id) except Exception: with excutils.save_and_reraise_exception(): @@ -2372,13 +2313,7 @@ class VolumeManager(manager.SchedulerDependentManager): LOG.info(_LI("Retype volume completed successfully."), resource=volume) - def manage_existing(self, ctxt, volume_id, ref=None, volume=None): - # FIXME(dulek): Remove this in v3.0 of RPC API. - if volume is None: - # For older clients, mimic the old behavior and look up the volume - # by its volume_id. - volume = objects.Volume.get_by_id(ctxt, volume_id) - + def manage_existing(self, ctxt, volume, ref=None): vol_ref = self._run_manage_existing_flow_engine( ctxt, volume, ref) @@ -4266,191 +4201,3 @@ class VolumeManager(manager.SchedulerDependentManager): def secure_file_operations_enabled(self, ctxt, volume): secure_enabled = self.driver.secure_file_operations_enabled() return secure_enabled - - -# TODO(dulek): This goes away immediately in Ocata and is just present in -# Newton so that we can receive v2.x and v3.0 messages. -class _VolumeV3Proxy(object): - target = messaging.Target(version='3.1') - - def __init__(self, manager): - self.manager = manager - - def create_volume(self, context, volume, request_spec=None, - filter_properties=None, allow_reschedule=True): - # NOTE(dulek): We're replacing volume_id with volume object (by - # switching it from optional keyword argument to positional argument). - return self.manager.create_volume( - context, volume.id, request_spec=request_spec, - filter_properties=filter_properties, - allow_reschedule=allow_reschedule, volume=volume) - - def delete_volume(self, context, volume, unmanage_only=False, - cascade=False): - return self.manager.delete_volume( - context, volume.id, unmanage_only=unmanage_only, volume=volume, - cascade=cascade) - - def create_snapshot(self, context, snapshot): - return self.manager.create_snapshot(context, snapshot.volume_id, - snapshot) - - def delete_snapshot(self, context, snapshot, unmanage_only=False): - return self.manager.delete_snapshot( - context, snapshot, unmanage_only=unmanage_only) - - def attach_volume(self, context, volume_id, instance_uuid, host_name, - mountpoint, mode): - return self.manager.attach_volume( - context, volume_id, instance_uuid, host_name, mountpoint, mode) - - def detach_volume(self, context, volume_id, attachment_id=None): - return self.manager.detach_volume(context, volume_id, - attachment_id=attachment_id) - - def copy_volume_to_image(self, context, volume_id, image_meta): - return self.manager.copy_volume_to_image(context, volume_id, - image_meta) - - def initialize_connection(self, context, volume, connector): - # NOTE(dulek): We're replacing volume_id with volume object (by - # switching it from optional keyword argument to positional argument). - return self.manager.initialize_connection(context, volume.id, - connector, volume=volume) - - def terminate_connection(self, context, volume_id, connector, force=False): - return self.manager.terminate_connection(context, volume_id, connector, - force=force) - - def remove_export(self, context, volume_id): - return self.manager.remove_export(context, volume_id) - - def accept_transfer(self, context, volume_id, new_user, new_project): - return self.manager.accept_transfer(context, volume_id, new_user, - new_project) - - def migrate_volume_completion(self, ctxt, volume, new_volume, error=False): - # NOTE(dulek): We're replacing volume_id with volume object, same with - # new_volume_id (by switching them from optional keyword arguments to - # positional arguments). - return self.manager.migrate_volume_completion( - ctxt, volume.id, new_volume.id, error=error, volume=volume, - new_volume=new_volume) - - def migrate_volume(self, ctxt, volume, host, force_host_copy=False, - new_type_id=None): - # NOTE(dulek): We're replacing volume_id with volume object (by - # switching it from optional keyword argument to positional argument). - return self.manager.migrate_volume( - ctxt, volume.id, host, force_host_copy=force_host_copy, - new_type_id=new_type_id, volume=volume) - - def publish_service_capabilities(self, context): - return self.manager.publish_service_capabilities(context) - - def extend_volume(self, context, volume, new_size, reservations): - # NOTE(dulek): We're replacing volume_id with volume object (by - # switching it from optional keyword argument to positional argument). - return self.manager.extend_volume( - context, volume.id, new_size, reservations, volume=volume) - - def retype(self, context, volume, new_type_id, host, - migration_policy='never', reservations=None, - old_reservations=None): - return self.manager.retype( - context, volume.id, new_type_id, host, - migration_policy=migration_policy, reservations=reservations, - volume=volume, old_reservations=old_reservations) - - def manage_existing(self, ctxt, volume, ref=None): - return self.manager.manage_existing(ctxt, volume.id, ref=ref, - volume=volume) - - def get_manageable_volumes(self, ctxt, marker, limit, offset, sort_keys, - sort_dirs): - return self.manager.get_manageable_volumes(ctxt, marker, limit, offset, - sort_keys, sort_dirs) - - def promote_replica(self, ctxt, volume_id): - return self.manager.promote_replica(ctxt, volume_id) - - def reenable_replication(self, ctxt, volume_id): - return self.manager.reenable_replication(ctxt, volume_id) - - def create_consistencygroup(self, context, group): - return self.manager.create_consistencygroup(context, group) - - def create_group(self, context, group): - return self.manager.create_group(context, group) - - def create_consistencygroup_from_src(self, context, group, cgsnapshot=None, - source_cg=None): - return self.manager.create_consistencygroup_from_src( - context, group, cgsnapshot=cgsnapshot, source_cg=source_cg) - - def create_group_from_src(self, context, group, group_snapshot=None, - source_group=None): - return self.manager.create_group_from_src( - context, group, group_snapshot=group_snapshot, - source_group=source_group) - - def delete_consistencygroup(self, context, group): - return self.manager.delete_consistencygroup(context, group) - - def delete_group(self, context, group): - return self.manager.delete_group(context, group) - - def update_consistencygroup(self, context, group, add_volumes=None, - remove_volumes=None): - return self.manager.update_consistencygroup( - context, group, add_volumes=add_volumes, - remove_volumes=remove_volumes) - - def update_group(self, context, group, add_volumes=None, - remove_volumes=None): - return self.manager.update_group( - context, group, add_volumes=add_volumes, - remove_volumes=remove_volumes) - - def create_cgsnapshot(self, context, cgsnapshot): - return self.manager.create_cgsnapshot(context, cgsnapshot) - - def create_group_snapshot(self, context, group_snapshot): - return self.manager.create_group_snapshot(context, group_snapshot) - - def delete_cgsnapshot(self, context, cgsnapshot): - return self.manager.delete_cgsnapshot(context, cgsnapshot) - - def delete_group_snapshot(self, context, group_snapshot): - return self.manager.delete_group_snapshot(context, group_snapshot) - - def update_migrated_volume(self, ctxt, volume, new_volume, volume_status): - return self.manager.update_migrated_volume(ctxt, volume, new_volume, - volume_status) - - def failover_host(self, context, secondary_backend_id=None): - return self.manager.failover_host( - context, secondary_backend_id=secondary_backend_id) - - def freeze_host(self, context): - return self.manager.freeze_host(context) - - def thaw_host(self, context): - return self.manager.thaw_host(context) - - def manage_existing_snapshot(self, ctxt, snapshot, ref=None): - return self.manager.manage_existing_snapshot(ctxt, snapshot, ref=ref) - - def get_manageable_snapshots(self, ctxt, marker, limit, offset, sort_keys, - sort_dirs): - return self.manager.get_manageable_snapshots( - ctxt, marker, limit, offset, sort_keys, sort_dirs) - - def get_capabilities(self, context, discover): - return self.manager.get_capabilities(context, discover) - - def get_backup_device(self, ctxt, backup): - return self.manager.get_backup_device(ctxt, backup) - - def secure_file_operations_enabled(self, ctxt, volume): - return self.manager.secure_file_operations_enabled(ctxt, volume) diff --git a/cinder/volume/rpcapi.py b/cinder/volume/rpcapi.py index 81e064f169d..0448fc7c4a4 100644 --- a/cinder/volume/rpcapi.py +++ b/cinder/volume/rpcapi.py @@ -13,10 +13,7 @@ # under the License. -from oslo_serialization import jsonutils - from cinder.common import constants -from cinder import objects from cinder import quota from cinder import rpc from cinder.volume import utils @@ -124,20 +121,20 @@ class VolumeAPI(rpc.RPCAPI): return self.client.prepare(server=new_host, version=version) def create_consistencygroup(self, ctxt, group, host): - version = self._compat_ver('3.0', '2.0') + version = '3.0' cctxt = self._get_cctxt(host, version) cctxt.cast(ctxt, 'create_consistencygroup', group=group) def delete_consistencygroup(self, ctxt, group): - version = self._compat_ver('3.0', '2.0') + version = '3.0' cctxt = self._get_cctxt(group.host, version) cctxt.cast(ctxt, 'delete_consistencygroup', group=group) def update_consistencygroup(self, ctxt, group, add_volumes=None, remove_volumes=None): - version = self._compat_ver('3.0', '2.0') + version = '3.0' cctxt = self._get_cctxt(group.host, version) cctxt.cast(ctxt, 'update_consistencygroup', group=group, @@ -146,7 +143,7 @@ class VolumeAPI(rpc.RPCAPI): def create_consistencygroup_from_src(self, ctxt, group, cgsnapshot=None, source_cg=None): - version = self._compat_ver('3.0', '2.0') + version = '3.0' cctxt = self._get_cctxt(group.host, version) cctxt.cast(ctxt, 'create_consistencygroup_from_src', group=group, @@ -154,12 +151,12 @@ class VolumeAPI(rpc.RPCAPI): source_cg=source_cg) def create_cgsnapshot(self, ctxt, cgsnapshot): - version = self._compat_ver('3.0', '2.0') + version = '3.0' cctxt = self._get_cctxt(cgsnapshot.consistencygroup.host, version) cctxt.cast(ctxt, 'create_cgsnapshot', cgsnapshot=cgsnapshot) def delete_cgsnapshot(self, ctxt, cgsnapshot): - version = self._compat_ver('3.0', '2.0') + version = '3.0' cctxt = self._get_cctxt(cgsnapshot.consistencygroup.host, version) cctxt.cast(ctxt, 'delete_cgsnapshot', cgsnapshot=cgsnapshot) @@ -170,50 +167,35 @@ class VolumeAPI(rpc.RPCAPI): 'allow_reschedule': allow_reschedule, 'volume': volume, } - version = self._compat_ver('3.0', '2.4', '2.0') - if version in ('2.4', '2.0'): - msg_args['volume_id'] = volume.id - if version == '2.0': - # Send request_spec as dict - msg_args['request_spec'] = jsonutils.to_primitive(request_spec) + version = '3.0' cctxt = self._get_cctxt(host, version) cctxt.cast(ctxt, 'create_volume', **msg_args) def delete_volume(self, ctxt, volume, unmanage_only=False, cascade=False): - version = self._compat_ver('3.0', '2.0') + version = '3.0' cctxt = self._get_cctxt(volume.host, version) msg_args = { 'volume': volume, 'unmanage_only': unmanage_only, 'cascade': cascade, } - if version == '2.0': - msg_args['volume_id'] = volume.id - cctxt.cast(ctxt, 'delete_volume', **msg_args) def create_snapshot(self, ctxt, volume, snapshot): - version = self._compat_ver('3.0', '2.0') + version = '3.0' cctxt = self._get_cctxt(volume['host'], version) - msg_args = { - 'snapshot': snapshot, - } - - if version == '2.0': - msg_args['volume_id'] = volume['id'] - - cctxt.cast(ctxt, 'create_snapshot', **msg_args) + cctxt.cast(ctxt, 'create_snapshot', snapshot=snapshot) def delete_snapshot(self, ctxt, snapshot, host, unmanage_only=False): - version = self._compat_ver('3.0', '2.0') + version = '3.0' cctxt = self._get_cctxt(host, version) cctxt.cast(ctxt, 'delete_snapshot', snapshot=snapshot, unmanage_only=unmanage_only) def attach_volume(self, ctxt, volume, instance_uuid, host_name, mountpoint, mode): - version = self._compat_ver('3.0', '2.0') + version = '3.0' cctxt = self._get_cctxt(volume['host'], version) return cctxt.call(ctxt, 'attach_volume', volume_id=volume['id'], @@ -223,68 +205,59 @@ class VolumeAPI(rpc.RPCAPI): mode=mode) def detach_volume(self, ctxt, volume, attachment_id): - version = self._compat_ver('3.0', '2.0') + version = '3.0' cctxt = self._get_cctxt(volume['host'], version) return cctxt.call(ctxt, 'detach_volume', volume_id=volume['id'], attachment_id=attachment_id) def copy_volume_to_image(self, ctxt, volume, image_meta): - version = self._compat_ver('3.0', '2.0') + version = '3.0' cctxt = self._get_cctxt(volume['host'], version) cctxt.cast(ctxt, 'copy_volume_to_image', volume_id=volume['id'], image_meta=image_meta) def initialize_connection(self, ctxt, volume, connector): - version = self._compat_ver('3.0', '2.3', '2.0') + version = '3.0' msg_args = {'connector': connector, 'volume': volume} - if version in ('2.0', '2.3'): - msg_args['volume_id'] = volume.id - if version == '2.0': - del msg_args['volume'] - cctxt = self._get_cctxt(volume['host'], version=version) return cctxt.call(ctxt, 'initialize_connection', **msg_args) def terminate_connection(self, ctxt, volume, connector, force=False): - version = self._compat_ver('3.0', '2.0') + version = '3.0' cctxt = self._get_cctxt(volume['host'], version) return cctxt.call(ctxt, 'terminate_connection', volume_id=volume['id'], connector=connector, force=force) def remove_export(self, ctxt, volume): - version = self._compat_ver('3.0', '2.0') + version = '3.0' cctxt = self._get_cctxt(volume['host'], version) cctxt.cast(ctxt, 'remove_export', volume_id=volume['id']) def publish_service_capabilities(self, ctxt): - version = self._compat_ver('3.0', '2.0') + version = '3.0' cctxt = self.client.prepare(fanout=True, version=version) cctxt.cast(ctxt, 'publish_service_capabilities') def accept_transfer(self, ctxt, volume, new_user, new_project): - version = self._compat_ver('3.0', '2.0') + version = '3.0' cctxt = self._get_cctxt(volume['host'], version) return cctxt.call(ctxt, 'accept_transfer', volume_id=volume['id'], new_user=new_user, new_project=new_project) def extend_volume(self, ctxt, volume, new_size, reservations): - version = self._compat_ver('3.0', '2.0') + version = '3.0' cctxt = self._get_cctxt(volume.host, version) msg_args = { 'volume': volume, 'new_size': new_size, 'reservations': reservations, } - - if version == '2.0': - msg_args['volume_id'] = volume.id - cctxt.cast(ctxt, 'extend_volume', **msg_args) def migrate_volume(self, ctxt, volume, dest_host, force_host_copy): host_p = {'host': dest_host.host, 'capabilities': dest_host.capabilities} - version = self._compat_ver('3.0', '2.0') + version = '3.0' cctxt = self._get_cctxt(volume.host, version) msg_args = { @@ -292,23 +265,16 @@ class VolumeAPI(rpc.RPCAPI): 'force_host_copy': force_host_copy, } - if version == '2.0': - msg_args['volume_id'] = volume.id - cctxt.cast(ctxt, 'migrate_volume', **msg_args) def migrate_volume_completion(self, ctxt, volume, new_volume, error): - version = self._compat_ver('3.0', '2.0') + version = '3.0' cctxt = self._get_cctxt(volume.host, version) msg_args = { 'volume': volume, 'new_volume': new_volume, 'error': error, } - if version == '2.0': - msg_args['volume_id'] = volume.id - msg_args['new_volume_id'] = new_volume.id - return cctxt.call(ctxt, 'migrate_volume_completion', **msg_args) def retype(self, ctxt, volume, new_type_id, dest_host, @@ -316,7 +282,7 @@ class VolumeAPI(rpc.RPCAPI): old_reservations=None): host_p = {'host': dest_host.host, 'capabilities': dest_host.capabilities} - version = self._compat_ver('3.0', '2.0') + version = '3.0' cctxt = self._get_cctxt(volume.host, version) msg_args = { @@ -325,26 +291,19 @@ class VolumeAPI(rpc.RPCAPI): 'old_reservations': old_reservations, } - if version == '2.0': - msg_args['volume_id'] = volume.id - cctxt.cast(ctxt, 'retype', **msg_args) def manage_existing(self, ctxt, volume, ref): msg_args = { 'ref': ref, 'volume': volume, } - version = self._compat_ver('3.0', '2.2', '2.0') - if version in ('2.2', '2.0'): - msg_args['volume_id'] = volume.id - if version == '2.0': - msg_args.pop('volume') + version = '3.0' cctxt = self._get_cctxt(volume.host, version) cctxt.cast(ctxt, 'manage_existing', **msg_args) def update_migrated_volume(self, ctxt, volume, new_volume, original_volume_status): - version = self._compat_ver('3.0', '2.0') + version = '3.0' cctxt = self._get_cctxt(new_volume['host'], version) cctxt.call(ctxt, 'update_migrated_volume', @@ -354,61 +313,50 @@ class VolumeAPI(rpc.RPCAPI): def freeze_host(self, ctxt, host): """Set backend host to frozen.""" - version = self._compat_ver('3.0', '2.0') + version = '3.0' cctxt = self._get_cctxt(host, version) return cctxt.call(ctxt, 'freeze_host') def thaw_host(self, ctxt, host): """Clear the frozen setting on a backend host.""" - version = self._compat_ver('3.0', '2.0') + version = '3.0' cctxt = self._get_cctxt(host, version) return cctxt.call(ctxt, 'thaw_host') def failover_host(self, ctxt, host, secondary_backend_id=None): """Failover host to the specified backend_id (secondary). """ - version = self._compat_ver('3.0', '2.0') + version = '3.0' cctxt = self._get_cctxt(host, version) cctxt.cast(ctxt, 'failover_host', secondary_backend_id=secondary_backend_id) def manage_existing_snapshot(self, ctxt, snapshot, ref, host): - version = self._compat_ver('3.0', '2.0') + version = '3.0' cctxt = self._get_cctxt(host, version) cctxt.cast(ctxt, 'manage_existing_snapshot', snapshot=snapshot, ref=ref) def get_capabilities(self, ctxt, host, discover): - version = self._compat_ver('3.0', '2.0') + version = '3.0' cctxt = self._get_cctxt(host, version) return cctxt.call(ctxt, 'get_capabilities', discover=discover) def get_backup_device(self, ctxt, backup, volume): - version = self._compat_ver('3.0', '2.0') + version = '3.0' cctxt = self._get_cctxt(volume.host, version) backup_dict = cctxt.call(ctxt, 'get_backup_device', backup=backup) - - # FIXME(dulek): Snippet below converts received raw dicts to o.vo. This - # is only for a case when Mitaka's c-vol will answer us with volume - # dict instead of an o.vo and should go away in early Ocata. - if isinstance(backup_dict.get('backup_device'), dict): - is_snapshot = backup_dict.get('is_snapshot') - obj_class = objects.Snapshot if is_snapshot else objects.Volume - obj = obj_class() - obj_class._from_db_object(ctxt, obj, backup_dict['backup_device']) - backup_dict['backup_device'] = obj - return backup_dict def secure_file_operations_enabled(self, ctxt, volume): - version = self._compat_ver('3.0', '2.0') + version = '3.0' cctxt = self._get_cctxt(volume.host, version) return cctxt.call(ctxt, 'secure_file_operations_enabled', volume=volume) def get_manageable_volumes(self, ctxt, host, marker, limit, offset, sort_keys, sort_dirs): - version = self._compat_ver('3.0', '2.1') + version = '3.0' cctxt = self._get_cctxt(host, version) return cctxt.call(ctxt, 'get_manageable_volumes', marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, @@ -416,27 +364,27 @@ class VolumeAPI(rpc.RPCAPI): def get_manageable_snapshots(self, ctxt, host, marker, limit, offset, sort_keys, sort_dirs): - version = self._compat_ver('3.0', '2.1') + version = '3.0' cctxt = self._get_cctxt(host, version) return cctxt.call(ctxt, 'get_manageable_snapshots', marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) def create_group(self, ctxt, group, host): - version = self._compat_ver('3.0', '2.5') + version = '3.0' cctxt = self._get_cctxt(host, version) cctxt.cast(ctxt, 'create_group', group=group) def delete_group(self, ctxt, group): - version = self._compat_ver('3.0', '2.5') + version = '3.0' cctxt = self._get_cctxt(group.host, version) cctxt.cast(ctxt, 'delete_group', group=group) def update_group(self, ctxt, group, add_volumes=None, remove_volumes=None): - version = self._compat_ver('3.0', '2.5') + version = '3.0' cctxt = self._get_cctxt(group.host, version) cctxt.cast(ctxt, 'update_group', group=group, @@ -445,7 +393,7 @@ class VolumeAPI(rpc.RPCAPI): def create_group_from_src(self, ctxt, group, group_snapshot=None, source_group=None): - version = self._compat_ver('3.0', '2.6') + version = '3.0' cctxt = self._get_cctxt(group.host, version) cctxt.cast(ctxt, 'create_group_from_src', group=group, @@ -453,13 +401,13 @@ class VolumeAPI(rpc.RPCAPI): source_group=source_group) def create_group_snapshot(self, ctxt, group_snapshot): - version = self._compat_ver('3.0', '2.6') + version = '3.0' cctxt = self._get_cctxt(group_snapshot.group.host, version) cctxt.cast(ctxt, 'create_group_snapshot', group_snapshot=group_snapshot) def delete_group_snapshot(self, ctxt, group_snapshot): - version = self._compat_ver('3.0', '2.6') + version = '3.0' cctxt = self._get_cctxt(group_snapshot.group.host, version) cctxt.cast(ctxt, 'delete_group_snapshot', group_snapshot=group_snapshot)