Merge "Update create_volume API to use versionedobjects"
This commit is contained in:
commit
3b385f8f27
@ -398,11 +398,6 @@ class VolumeController(wsgi.Controller):
|
||||
volume.get('display_description'),
|
||||
**kwargs)
|
||||
|
||||
# TODO(vish): Instance should be None at db layer instead of
|
||||
# trying to lazy load, but for now we turn it into
|
||||
# a dict to avoid an error.
|
||||
new_volume = dict(new_volume)
|
||||
|
||||
retval = _translate_volume_detail_view(context, new_volume, image_uuid)
|
||||
|
||||
return {'volume': retval}
|
||||
|
@ -418,10 +418,6 @@ class VolumeController(wsgi.Controller):
|
||||
volume.get('display_description'),
|
||||
**kwargs)
|
||||
|
||||
# TODO(vish): Instance should be None at db layer instead of
|
||||
# trying to lazy load, but for now we turn it into
|
||||
# a dict to avoid an error.
|
||||
new_volume = dict(new_volume)
|
||||
retval = self._view_builder.detail(req, new_volume)
|
||||
|
||||
return retval
|
||||
|
@ -23,8 +23,8 @@ from oslo_config import cfg
|
||||
from oslo_utils import importutils
|
||||
from oslo_utils import timeutils
|
||||
|
||||
from cinder import db
|
||||
from cinder.i18n import _
|
||||
from cinder import objects
|
||||
from cinder.volume import rpcapi as volume_rpcapi
|
||||
|
||||
|
||||
@ -46,8 +46,14 @@ def volume_update_db(context, volume_id, host):
|
||||
|
||||
:returns: A Volume with the updated fields set properly.
|
||||
"""
|
||||
values = {'host': host, 'scheduled_at': timeutils.utcnow()}
|
||||
return db.volume_update(context, volume_id, values)
|
||||
volume = objects.Volume.get_by_id(context, volume_id)
|
||||
volume.host = host
|
||||
volume.scheduled_at = timeutils.utcnow()
|
||||
volume.save()
|
||||
|
||||
# A volume object is expected to be returned, as it is used by
|
||||
# filter_scheduler.
|
||||
return volume
|
||||
|
||||
|
||||
def group_update_db(context, group, host):
|
||||
|
@ -17,7 +17,7 @@ from taskflow.patterns import linear_flow
|
||||
|
||||
from cinder import exception
|
||||
from cinder import flow_utils
|
||||
from cinder.i18n import _, _LE
|
||||
from cinder.i18n import _LE
|
||||
from cinder import rpc
|
||||
from cinder import utils
|
||||
from cinder.volume.flows import common
|
||||
@ -40,39 +40,33 @@ class ExtractSchedulerSpecTask(flow_utils.CinderTask):
|
||||
**kwargs)
|
||||
self.db_api = db_api
|
||||
|
||||
def _populate_request_spec(self, context, volume_id, snapshot_id,
|
||||
def _populate_request_spec(self, context, volume, snapshot_id,
|
||||
image_id):
|
||||
# Create the full request spec using the volume_id.
|
||||
# Create the full request spec using the volume object.
|
||||
#
|
||||
# NOTE(harlowja): this will fetch the volume from the database, if
|
||||
# the volume has been deleted before we got here then this should fail.
|
||||
#
|
||||
# In the future we might want to have a lock on the volume_id so that
|
||||
# the volume can not be deleted while its still being created?
|
||||
if not volume_id:
|
||||
raise exception.InvalidInput(
|
||||
reason=_("No volume_id provided to populate a "
|
||||
"request_spec from"))
|
||||
volume_ref = self.db_api.volume_get(context, volume_id)
|
||||
volume_type_id = volume_ref.get('volume_type_id')
|
||||
vol_type = self.db_api.volume_type_get(context, volume_type_id)
|
||||
# NOTE(dulek): At this point, a volume can be deleted before it gets
|
||||
# scheduled. If a delete API call is made, the volume gets instantly
|
||||
# delete and scheduling will fail when it tries to update the DB entry
|
||||
# (with the host) in ScheduleCreateVolumeTask below.
|
||||
volume_type_id = volume.volume_type_id
|
||||
vol_type = volume.volume_type
|
||||
return {
|
||||
'volume_id': volume_id,
|
||||
'volume_id': volume.id,
|
||||
'snapshot_id': snapshot_id,
|
||||
'image_id': image_id,
|
||||
'volume_properties': {
|
||||
'size': utils.as_int(volume_ref.get('size'), quiet=False),
|
||||
'availability_zone': volume_ref.get('availability_zone'),
|
||||
'size': utils.as_int(volume.size, quiet=False),
|
||||
'availability_zone': volume.availability_zone,
|
||||
'volume_type_id': volume_type_id,
|
||||
},
|
||||
'volume_type': list(dict(vol_type).items()),
|
||||
}
|
||||
|
||||
def execute(self, context, request_spec, volume_id, snapshot_id,
|
||||
def execute(self, context, request_spec, volume, snapshot_id,
|
||||
image_id):
|
||||
# For RPC version < 1.2 backward compatibility
|
||||
if request_spec is None:
|
||||
request_spec = self._populate_request_spec(context, volume_id,
|
||||
request_spec = self._populate_request_spec(context, volume.id,
|
||||
snapshot_id, image_id)
|
||||
return {
|
||||
'request_spec': request_spec,
|
||||
@ -143,7 +137,7 @@ class ScheduleCreateVolumeTask(flow_utils.CinderTask):
|
||||
|
||||
def get_flow(context, db_api, driver_api, request_spec=None,
|
||||
filter_properties=None,
|
||||
volume_id=None, snapshot_id=None, image_id=None):
|
||||
volume=None, snapshot_id=None, image_id=None):
|
||||
|
||||
"""Constructs and returns the scheduler entrypoint flow.
|
||||
|
||||
@ -158,7 +152,7 @@ def get_flow(context, db_api, driver_api, request_spec=None,
|
||||
'context': context,
|
||||
'raw_request_spec': request_spec,
|
||||
'filter_properties': filter_properties,
|
||||
'volume_id': volume_id,
|
||||
'volume': volume,
|
||||
'snapshot_id': snapshot_id,
|
||||
'image_id': image_id,
|
||||
}
|
||||
|
@ -33,6 +33,7 @@ from cinder import exception
|
||||
from cinder import flow_utils
|
||||
from cinder.i18n import _, _LE
|
||||
from cinder import manager
|
||||
from cinder import objects
|
||||
from cinder import quota
|
||||
from cinder import rpc
|
||||
from cinder.scheduler.flows import create_volume
|
||||
@ -55,7 +56,7 @@ LOG = logging.getLogger(__name__)
|
||||
class SchedulerManager(manager.Manager):
|
||||
"""Chooses a host to create volumes."""
|
||||
|
||||
RPC_API_VERSION = '1.8'
|
||||
RPC_API_VERSION = '1.9'
|
||||
|
||||
target = messaging.Target(version=RPC_API_VERSION)
|
||||
|
||||
@ -116,15 +117,22 @@ class SchedulerManager(manager.Manager):
|
||||
|
||||
def create_volume(self, context, topic, volume_id, snapshot_id=None,
|
||||
image_id=None, request_spec=None,
|
||||
filter_properties=None):
|
||||
filter_properties=None, volume=None):
|
||||
|
||||
self._wait_for_scheduler()
|
||||
|
||||
# FIXME(thangp): Remove this in v2.0 of RPC API.
|
||||
if volume is None:
|
||||
# For older clients, mimic the old behavior and look up the
|
||||
# volume by its volume_id.
|
||||
volume = objects.Volume.get_by_id(context, volume_id)
|
||||
|
||||
try:
|
||||
flow_engine = create_volume.get_flow(context,
|
||||
db, self.driver,
|
||||
request_spec,
|
||||
filter_properties,
|
||||
volume_id,
|
||||
volume,
|
||||
snapshot_id,
|
||||
image_id)
|
||||
except Exception:
|
||||
|
@ -42,6 +42,7 @@ class SchedulerAPI(object):
|
||||
1.6 - Add create_consistencygroup method
|
||||
1.7 - Add get_active_pools method
|
||||
1.8 - Add sending object over RPC in create_consistencygroup method
|
||||
1.9 - Adds support for sending objects over RPC in create_volume()
|
||||
"""
|
||||
|
||||
RPC_API_VERSION = '1.0'
|
||||
@ -51,7 +52,10 @@ class SchedulerAPI(object):
|
||||
target = messaging.Target(topic=CONF.scheduler_topic,
|
||||
version=self.RPC_API_VERSION)
|
||||
serializer = objects_base.CinderObjectSerializer()
|
||||
self.client = rpc.get_client(target, version_cap='1.8',
|
||||
|
||||
# NOTE(thangp): Until version pinning is impletemented, set the client
|
||||
# version_cap to None
|
||||
self.client = rpc.get_client(target, version_cap=None,
|
||||
serializer=serializer)
|
||||
|
||||
def create_consistencygroup(self, ctxt, topic, group,
|
||||
@ -72,17 +76,21 @@ class SchedulerAPI(object):
|
||||
|
||||
def create_volume(self, ctxt, topic, volume_id, snapshot_id=None,
|
||||
image_id=None, request_spec=None,
|
||||
filter_properties=None):
|
||||
filter_properties=None, volume=None):
|
||||
|
||||
cctxt = self.client.prepare(version='1.2')
|
||||
request_spec_p = jsonutils.to_primitive(request_spec)
|
||||
return cctxt.cast(ctxt, 'create_volume',
|
||||
topic=topic,
|
||||
volume_id=volume_id,
|
||||
snapshot_id=snapshot_id,
|
||||
image_id=image_id,
|
||||
request_spec=request_spec_p,
|
||||
filter_properties=filter_properties)
|
||||
msg_args = {'topic': topic, 'volume_id': volume_id,
|
||||
'snapshot_id': snapshot_id, 'image_id': image_id,
|
||||
'request_spec': request_spec_p,
|
||||
'filter_properties': filter_properties}
|
||||
if self.client.can_send_version('1.9'):
|
||||
version = '1.9'
|
||||
msg_args['volume'] = volume
|
||||
else:
|
||||
version = '1.2'
|
||||
|
||||
cctxt = self.client.prepare(version=version)
|
||||
return cctxt.cast(ctxt, 'create_volume', **msg_args)
|
||||
|
||||
def migrate_volume_to_host(self, ctxt, topic, volume_id, host,
|
||||
force_host_copy=False, request_spec=None,
|
||||
|
@ -87,7 +87,25 @@ class SchedulerRpcAPITestCase(test.TestCase):
|
||||
capabilities='fake_capabilities',
|
||||
fanout=True)
|
||||
|
||||
def test_create_volume(self):
|
||||
@mock.patch('oslo_messaging.RPCClient.can_send_version',
|
||||
return_value=True)
|
||||
def test_create_volume(self, can_send_version):
|
||||
self._test_scheduler_api('create_volume',
|
||||
rpc_method='cast',
|
||||
topic='topic',
|
||||
volume_id='volume_id',
|
||||
snapshot_id='snapshot_id',
|
||||
image_id='image_id',
|
||||
request_spec='fake_request_spec',
|
||||
filter_properties='filter_properties',
|
||||
volume='volume',
|
||||
version='1.9')
|
||||
can_send_version.assert_called_once_with('1.9')
|
||||
|
||||
@mock.patch('oslo_messaging.RPCClient.can_send_version',
|
||||
return_value=False)
|
||||
def test_create_volume_old(self, can_send_version):
|
||||
# Tests backwards compatibility with older clients
|
||||
self._test_scheduler_api('create_volume',
|
||||
rpc_method='cast',
|
||||
topic='topic',
|
||||
@ -97,6 +115,7 @@ class SchedulerRpcAPITestCase(test.TestCase):
|
||||
request_spec='fake_request_spec',
|
||||
filter_properties='filter_properties',
|
||||
version='1.2')
|
||||
can_send_version.assert_called_once_with('1.9')
|
||||
|
||||
def test_migrate_volume_to_host(self):
|
||||
self._test_scheduler_api('migrate_volume_to_host',
|
||||
|
@ -28,6 +28,7 @@ from cinder.scheduler import filter_scheduler
|
||||
from cinder.scheduler import manager
|
||||
from cinder import test
|
||||
from cinder.tests.unit import fake_consistencygroup
|
||||
from cinder.tests.unit import fake_volume
|
||||
from cinder.tests.unit import utils as tests_utils
|
||||
|
||||
CONF = cfg.CONF
|
||||
@ -100,15 +101,16 @@ class SchedulerManagerTestCase(test.TestCase):
|
||||
# Test NoValidHost exception behavior for create_volume.
|
||||
# Puts the volume in 'error' state and eats the exception.
|
||||
_mock_sched_create.side_effect = exception.NoValidHost(reason="")
|
||||
fake_volume_id = 1
|
||||
volume = fake_volume.fake_volume_obj(self.context)
|
||||
topic = 'fake_topic'
|
||||
request_spec = {'volume_id': fake_volume_id}
|
||||
request_spec = {'volume_id': volume.id}
|
||||
|
||||
self.manager.create_volume(self.context, topic, fake_volume_id,
|
||||
self.manager.create_volume(self.context, topic, volume.id,
|
||||
request_spec=request_spec,
|
||||
filter_properties={})
|
||||
filter_properties={},
|
||||
volume=volume)
|
||||
_mock_volume_update.assert_called_once_with(self.context,
|
||||
fake_volume_id,
|
||||
volume.id,
|
||||
{'status': 'error'})
|
||||
_mock_sched_create.assert_called_once_with(self.context, request_spec,
|
||||
{})
|
||||
@ -116,14 +118,15 @@ class SchedulerManagerTestCase(test.TestCase):
|
||||
@mock.patch('cinder.scheduler.driver.Scheduler.schedule_create_volume')
|
||||
@mock.patch('eventlet.sleep')
|
||||
def test_create_volume_no_delay(self, _mock_sleep, _mock_sched_create):
|
||||
fake_volume_id = 1
|
||||
volume = fake_volume.fake_volume_obj(self.context)
|
||||
topic = 'fake_topic'
|
||||
|
||||
request_spec = {'volume_id': fake_volume_id}
|
||||
request_spec = {'volume_id': volume.id}
|
||||
|
||||
self.manager.create_volume(self.context, topic, fake_volume_id,
|
||||
self.manager.create_volume(self.context, topic, volume.id,
|
||||
request_spec=request_spec,
|
||||
filter_properties={})
|
||||
filter_properties={},
|
||||
volume=volume)
|
||||
_mock_sched_create.assert_called_once_with(self.context, request_spec,
|
||||
{})
|
||||
self.assertFalse(_mock_sleep.called)
|
||||
@ -135,16 +138,17 @@ class SchedulerManagerTestCase(test.TestCase):
|
||||
_mock_is_ready,
|
||||
_mock_sched_create):
|
||||
self.manager._startup_delay = True
|
||||
fake_volume_id = 1
|
||||
volume = fake_volume.fake_volume_obj(self.context)
|
||||
topic = 'fake_topic'
|
||||
|
||||
request_spec = {'volume_id': fake_volume_id}
|
||||
request_spec = {'volume_id': volume.id}
|
||||
|
||||
_mock_is_ready.side_effect = [False, False, True]
|
||||
|
||||
self.manager.create_volume(self.context, topic, fake_volume_id,
|
||||
self.manager.create_volume(self.context, topic, volume.id,
|
||||
request_spec=request_spec,
|
||||
filter_properties={})
|
||||
filter_properties={},
|
||||
volume=volume)
|
||||
_mock_sched_create.assert_called_once_with(self.context, request_spec,
|
||||
{})
|
||||
calls = [mock.call(1)] * 2
|
||||
@ -158,16 +162,17 @@ class SchedulerManagerTestCase(test.TestCase):
|
||||
_mock_is_ready,
|
||||
_mock_sched_create):
|
||||
self.manager._startup_delay = True
|
||||
fake_volume_id = 1
|
||||
volume = fake_volume.fake_volume_obj(self.context)
|
||||
topic = 'fake_topic'
|
||||
|
||||
request_spec = {'volume_id': fake_volume_id}
|
||||
request_spec = {'volume_id': volume.id}
|
||||
|
||||
_mock_is_ready.return_value = True
|
||||
|
||||
self.manager.create_volume(self.context, topic, fake_volume_id,
|
||||
self.manager.create_volume(self.context, topic, volume.id,
|
||||
request_spec=request_spec,
|
||||
filter_properties={})
|
||||
filter_properties={},
|
||||
volume=volume)
|
||||
_mock_sched_create.assert_called_once_with(self.context, request_spec,
|
||||
{})
|
||||
self.assertFalse(_mock_sleep.called)
|
||||
@ -346,10 +351,13 @@ class SchedulerDriverModuleTestCase(test.TestCase):
|
||||
self.context = context.RequestContext('fake_user', 'fake_project')
|
||||
|
||||
@mock.patch('cinder.db.volume_update')
|
||||
@mock.patch('oslo_utils.timeutils.utcnow')
|
||||
def test_volume_host_update_db(self, _mock_utcnow, _mock_vol_update):
|
||||
_mock_utcnow.return_value = 'fake-now'
|
||||
driver.volume_update_db(self.context, 31337, 'fake_host')
|
||||
_mock_vol_update.assert_called_once_with(self.context, 31337,
|
||||
{'host': 'fake_host',
|
||||
'scheduled_at': 'fake-now'})
|
||||
@mock.patch('cinder.objects.volume.Volume.get_by_id')
|
||||
def test_volume_host_update_db(self, _mock_volume_get, _mock_vol_update):
|
||||
volume = fake_volume.fake_volume_obj(self.context)
|
||||
_mock_volume_get.return_value = volume
|
||||
|
||||
driver.volume_update_db(self.context, volume.id, 'fake_host')
|
||||
scheduled_at = volume.scheduled_at.replace(tzinfo=None)
|
||||
_mock_vol_update.assert_called_once_with(
|
||||
self.context, volume.id, {'host': 'fake_host',
|
||||
'scheduled_at': scheduled_at})
|
||||
|
@ -21,13 +21,12 @@ import os
|
||||
import tempfile
|
||||
|
||||
import mock
|
||||
from oslo_utils import timeutils
|
||||
from oslo_utils import units
|
||||
|
||||
from cinder import db
|
||||
from cinder import exception
|
||||
from cinder.i18n import _
|
||||
from cinder.image import image_utils
|
||||
from cinder import objects
|
||||
from cinder import test
|
||||
from cinder.tests.unit.image import fake as fake_image
|
||||
from cinder.tests.unit import test_volume
|
||||
@ -1090,7 +1089,6 @@ class ManagedRBDTestCase(test_volume.DriverTestCase):
|
||||
NOTE: if clone_error is True we force the image type to raw otherwise
|
||||
clone_image is not called
|
||||
"""
|
||||
volume_id = 1
|
||||
|
||||
# See tests.image.fake for image types.
|
||||
if raw:
|
||||
@ -1099,32 +1097,34 @@ class ManagedRBDTestCase(test_volume.DriverTestCase):
|
||||
image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
|
||||
|
||||
# creating volume testdata
|
||||
db.volume_create(self.context,
|
||||
{'id': volume_id,
|
||||
'updated_at': timeutils.utcnow(),
|
||||
'display_description': 'Test Desc',
|
||||
'size': 20,
|
||||
'status': 'creating',
|
||||
'instance_uuid': None,
|
||||
'host': 'dummy'})
|
||||
db_volume = {'display_description': 'Test Desc',
|
||||
'size': 20,
|
||||
'status': 'creating',
|
||||
'availability_zone': 'fake_zone',
|
||||
'attach_status': 'detached',
|
||||
'host': 'dummy'}
|
||||
volume = objects.Volume(context=self.context, **db_volume)
|
||||
volume.create()
|
||||
|
||||
try:
|
||||
if not clone_error:
|
||||
self.volume.create_volume(self.context,
|
||||
volume_id,
|
||||
request_spec={'image_id': image_id})
|
||||
volume.id,
|
||||
request_spec={'image_id': image_id},
|
||||
volume=volume)
|
||||
else:
|
||||
self.assertRaises(exception.CinderException,
|
||||
self.volume.create_volume,
|
||||
self.context,
|
||||
volume_id,
|
||||
request_spec={'image_id': image_id})
|
||||
volume.id,
|
||||
request_spec={'image_id': image_id},
|
||||
volume=volume)
|
||||
|
||||
volume = db.volume_get(self.context, volume_id)
|
||||
self.assertEqual(expected_status, volume['status'])
|
||||
volume = objects.Volume.get_by_id(self.context, volume.id)
|
||||
self.assertEqual(expected_status, volume.status)
|
||||
finally:
|
||||
# cleanup
|
||||
db.volume_destroy(self.context, volume_id)
|
||||
volume.destroy()
|
||||
|
||||
def test_create_vol_from_image_status_available(self):
|
||||
"""Clone raw image then verify volume is in available state."""
|
||||
|
@ -3024,16 +3024,12 @@ class StorwizeSVCDriverTestCase(test.TestCase):
|
||||
@mock.patch.object(storwize_svc_common.StorwizeHelpers, 'rename_vdisk')
|
||||
def test_storwize_update_migrated_volume(self, rename_vdisk):
|
||||
ctxt = testutils.get_test_admin_context()
|
||||
current_volume_id = 'fake_volume_id'
|
||||
original_volume_id = 'fake_original_volume_id'
|
||||
current_name = 'volume-' + current_volume_id
|
||||
original_name = 'volume-' + original_volume_id
|
||||
backend_volume = self._create_volume(id=current_volume_id)
|
||||
volume = self._create_volume(id=original_volume_id)
|
||||
backend_volume = self._create_volume()
|
||||
volume = self._create_volume()
|
||||
model_update = self.driver.update_migrated_volume(ctxt, volume,
|
||||
backend_volume,
|
||||
'available')
|
||||
rename_vdisk.assert_called_once_with(current_name, original_name)
|
||||
rename_vdisk.assert_called_once_with(backend_volume.name, volume.name)
|
||||
self.assertEqual({'_name_id': None}, model_update)
|
||||
|
||||
rename_vdisk.reset_mock()
|
||||
@ -3041,14 +3037,14 @@ class StorwizeSVCDriverTestCase(test.TestCase):
|
||||
model_update = self.driver.update_migrated_volume(ctxt, volume,
|
||||
backend_volume,
|
||||
'available')
|
||||
self.assertEqual({'_name_id': current_volume_id}, model_update)
|
||||
self.assertEqual({'_name_id': backend_volume.id}, model_update)
|
||||
|
||||
rename_vdisk.reset_mock()
|
||||
rename_vdisk.side_effect = exception.VolumeBackendAPIException
|
||||
model_update = self.driver.update_migrated_volume(ctxt, volume,
|
||||
backend_volume,
|
||||
'attached')
|
||||
self.assertEqual({'_name_id': current_volume_id}, model_update)
|
||||
self.assertEqual({'_name_id': backend_volume.id}, model_update)
|
||||
|
||||
def test_storwize_vdisk_copy_ops(self):
|
||||
ctxt = testutils.get_test_admin_context()
|
||||
|
@ -54,6 +54,7 @@ from cinder.tests.unit.brick import fake_lvm
|
||||
from cinder.tests.unit import conf_fixture
|
||||
from cinder.tests.unit import fake_driver
|
||||
from cinder.tests.unit import fake_snapshot
|
||||
from cinder.tests.unit import fake_volume
|
||||
from cinder.tests.unit.image import fake as fake_image
|
||||
from cinder.tests.unit.keymgr import fake as fake_keymgr
|
||||
from cinder.tests.unit import utils as tests_utils
|
||||
@ -516,17 +517,16 @@ class VolumeTestCase(BaseVolumeTestCase):
|
||||
availability_zone=CONF.storage_availability_zone,
|
||||
**self.volume_params)
|
||||
|
||||
volume_id = volume['id']
|
||||
self.assertIsNone(volume['encryption_key_id'])
|
||||
self.assertEqual(0, len(self.notifier.notifications),
|
||||
self.notifier.notifications)
|
||||
self.assertRaises(exception.DriverNotInitialized,
|
||||
self.volume.delete_volume,
|
||||
self.context, volume_id)
|
||||
self.context, volume.id)
|
||||
|
||||
volume = db.volume_get(context.get_admin_context(), volume_id)
|
||||
volume = objects.Volume.get_by_id(self.context, volume.id)
|
||||
self.assertEqual("error_deleting", volume.status)
|
||||
db.volume_destroy(context.get_admin_context(), volume_id)
|
||||
volume.destroy()
|
||||
|
||||
@mock.patch('cinder.quota.QUOTAS.rollback', new=mock.Mock())
|
||||
@mock.patch('cinder.quota.QUOTAS.commit', new=mock.Mock())
|
||||
@ -562,7 +562,7 @@ class VolumeTestCase(BaseVolumeTestCase):
|
||||
'replication_status': 'disabled',
|
||||
'replication_extended_status': None,
|
||||
'replication_driver_data': None,
|
||||
'metadata': [],
|
||||
'metadata': None,
|
||||
'volume_attachment': [],
|
||||
}
|
||||
self.assertDictMatch(expected, msg['payload'])
|
||||
@ -580,6 +580,7 @@ class VolumeTestCase(BaseVolumeTestCase):
|
||||
self.assertEqual(4, len(self.notifier.notifications),
|
||||
self.notifier.notifications)
|
||||
msg = self.notifier.notifications[2]
|
||||
expected['metadata'] = []
|
||||
self.assertEqual('volume.delete.start', msg['event_type'])
|
||||
self.assertDictMatch(expected, msg['payload'])
|
||||
msg = self.notifier.notifications[3]
|
||||
@ -597,9 +598,7 @@ class VolumeTestCase(BaseVolumeTestCase):
|
||||
**self.volume_params)
|
||||
volume_id = volume['id']
|
||||
self.volume.create_volume(self.context, volume_id)
|
||||
result_meta = {
|
||||
volume.volume_metadata[0].key: volume.volume_metadata[0].value}
|
||||
self.assertEqual(test_meta, result_meta)
|
||||
self.assertEqual(test_meta, volume.metadata)
|
||||
|
||||
self.volume.delete_volume(self.context, volume_id)
|
||||
self.assertRaises(exception.NotFound,
|
||||
@ -629,8 +628,7 @@ class VolumeTestCase(BaseVolumeTestCase):
|
||||
FAKE_METADATA_TYPE = enum.Enum('METADATA_TYPES', 'fake_type')
|
||||
volume = tests_utils.create_volume(self.context, metadata=test_meta1,
|
||||
**self.volume_params)
|
||||
volume_id = volume['id']
|
||||
self.volume.create_volume(self.context, volume_id)
|
||||
self.volume.create_volume(self.context, volume.id, volume=volume)
|
||||
|
||||
volume_api = cinder.volume.api.API()
|
||||
|
||||
@ -1558,7 +1556,6 @@ class VolumeTestCase(BaseVolumeTestCase):
|
||||
dst_vol = tests_utils.create_volume(self.context,
|
||||
source_volid=src_vol_id,
|
||||
**self.volume_params)
|
||||
dst_vol_id = dst_vol['id']
|
||||
|
||||
orig_elevated = self.context.elevated
|
||||
|
||||
@ -1571,7 +1568,7 @@ class VolumeTestCase(BaseVolumeTestCase):
|
||||
# we expect this to block and then fail
|
||||
t = eventlet.spawn(self.volume.create_volume,
|
||||
self.context,
|
||||
volume_id=dst_vol_id,
|
||||
volume_id=dst_vol.id,
|
||||
request_spec={'source_volid': src_vol_id})
|
||||
gthreads.append(t)
|
||||
|
||||
@ -1747,8 +1744,7 @@ class VolumeTestCase(BaseVolumeTestCase):
|
||||
dst_vol = tests_utils.create_volume(self.context,
|
||||
snapshot_id=snapshot_id,
|
||||
**self.volume_params)
|
||||
self.volume.create_volume(self.context,
|
||||
dst_vol['id'])
|
||||
self.volume.create_volume(self.context, dst_vol.id, volume=dst_vol)
|
||||
|
||||
self.assertRaises(exception.GlanceMetadataNotFound,
|
||||
db.volume_glance_metadata_copy_to_volume,
|
||||
@ -3548,8 +3544,7 @@ class VolumeTestCase(BaseVolumeTestCase):
|
||||
spec=tests_utils.get_file_spec())
|
||||
|
||||
image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
|
||||
volume_id = tests_utils.create_volume(self.context,
|
||||
**self.volume_params)['id']
|
||||
volume = tests_utils.create_volume(self.context, **self.volume_params)
|
||||
# creating volume testdata
|
||||
try:
|
||||
request_spec = {
|
||||
@ -3557,12 +3552,13 @@ class VolumeTestCase(BaseVolumeTestCase):
|
||||
'image_id': image_id,
|
||||
}
|
||||
self.volume.create_volume(self.context,
|
||||
volume_id,
|
||||
request_spec)
|
||||
volume.id,
|
||||
request_spec,
|
||||
volume=volume)
|
||||
finally:
|
||||
# cleanup
|
||||
os.unlink(dst_path)
|
||||
volume = db.volume_get(self.context, volume_id)
|
||||
volume = objects.Volume.get_by_id(self.context, volume.id)
|
||||
|
||||
return volume
|
||||
|
||||
@ -3600,25 +3596,25 @@ class VolumeTestCase(BaseVolumeTestCase):
|
||||
self.stubs.Set(self.volume.driver, 'local_path', lambda x: dst_path)
|
||||
|
||||
# creating volume testdata
|
||||
volume_id = 1
|
||||
db.volume_create(self.context,
|
||||
{'id': volume_id,
|
||||
'updated_at': datetime.datetime(1, 1, 1, 1, 1, 1),
|
||||
'display_description': 'Test Desc',
|
||||
'size': 20,
|
||||
'status': 'creating',
|
||||
'host': 'dummy'})
|
||||
kwargs = {'display_description': 'Test Desc',
|
||||
'size': 20,
|
||||
'availability_zone': 'fake_availability_zone',
|
||||
'status': 'creating',
|
||||
'attach_status': 'detached',
|
||||
'host': 'dummy'}
|
||||
volume = objects.Volume(context=self.context, **kwargs)
|
||||
volume.create()
|
||||
|
||||
self.assertRaises(exception.ImageNotFound,
|
||||
self.volume.create_volume,
|
||||
self.context,
|
||||
volume_id,
|
||||
volume.id,
|
||||
{'image_id': self.FAKE_UUID})
|
||||
volume = db.volume_get(self.context, volume_id)
|
||||
volume = objects.Volume.get_by_id(self.context, volume.id)
|
||||
self.assertEqual("error", volume['status'])
|
||||
self.assertFalse(volume['bootable'])
|
||||
# cleanup
|
||||
db.volume_destroy(self.context, volume_id)
|
||||
volume.destroy()
|
||||
os.unlink(dst_path)
|
||||
|
||||
def test_create_volume_from_image_copy_exception_rescheduling(self):
|
||||
@ -4389,20 +4385,20 @@ class VolumeMigrationTestCase(VolumeTestCase):
|
||||
nova_api):
|
||||
attached_host = 'some-host'
|
||||
fake_volume_id = 'fake_volume_id'
|
||||
fake_new_volume = {'status': 'available', 'id': fake_volume_id}
|
||||
fake_db_new_volume = {'status': 'available', 'id': fake_volume_id}
|
||||
fake_new_volume = fake_volume.fake_db_volume(**fake_db_new_volume)
|
||||
host_obj = {'host': 'newhost', 'capabilities': {}}
|
||||
fake_uuid = fakes.get_fake_uuid()
|
||||
update_server_volume = nova_api.return_value.update_server_volume
|
||||
volume_get.return_value = fake_new_volume
|
||||
volume = tests_utils.create_volume(self.context, size=1,
|
||||
host=CONF.host)
|
||||
volume = tests_utils.attach_volume(self.context, volume['id'],
|
||||
fake_uuid, attached_host,
|
||||
'/dev/vda')
|
||||
self.assertIsNotNone(volume['volume_attachment'][0]['id'])
|
||||
self.assertEqual(fake_uuid,
|
||||
volume['volume_attachment'][0]['instance_uuid'])
|
||||
self.assertEqual('in-use', volume['status'])
|
||||
volume_attach = tests_utils.attach_volume(
|
||||
self.context, volume['id'], fake_uuid, attached_host, '/dev/vda')
|
||||
self.assertIsNotNone(volume_attach['volume_attachment'][0]['id'])
|
||||
self.assertEqual(
|
||||
fake_uuid, volume_attach['volume_attachment'][0]['instance_uuid'])
|
||||
self.assertEqual('in-use', volume_attach['status'])
|
||||
self.volume._migrate_volume_generic(self.context, volume,
|
||||
host_obj, None)
|
||||
self.assertFalse(migrate_volume_completion.called)
|
||||
@ -5118,8 +5114,7 @@ class ConsistencyGroupTestCase(BaseVolumeTestCase):
|
||||
consistencygroup_id=group2.id,
|
||||
snapshot_id=snapshot_id,
|
||||
**self.volume_params)
|
||||
volume2_id = volume2['id']
|
||||
self.volume.create_volume(self.context, volume2_id)
|
||||
self.volume.create_volume(self.context, volume2.id, volume=volume2)
|
||||
self.volume.create_consistencygroup_from_src(
|
||||
self.context, group2, cgsnapshot=cgsnapshot)
|
||||
cg2 = objects.ConsistencyGroup.get_by_id(self.context, group2.id)
|
||||
@ -5186,8 +5181,7 @@ class ConsistencyGroupTestCase(BaseVolumeTestCase):
|
||||
consistencygroup_id=group3.id,
|
||||
source_volid=volume_id,
|
||||
**self.volume_params)
|
||||
volume3_id = volume3['id']
|
||||
self.volume.create_volume(self.context, volume3_id)
|
||||
self.volume.create_volume(self.context, volume3.id, volume=volume3)
|
||||
self.volume.create_consistencygroup_from_src(
|
||||
self.context, group3, source_cg=group)
|
||||
|
||||
@ -5444,8 +5438,7 @@ class ConsistencyGroupTestCase(BaseVolumeTestCase):
|
||||
status='creating',
|
||||
size=1)
|
||||
self.volume.host = 'host1@backend1'
|
||||
volume_id = volume['id']
|
||||
self.volume.create_volume(self.context, volume_id)
|
||||
self.volume.create_volume(self.context, volume.id, volume=volume)
|
||||
|
||||
self.volume.delete_consistencygroup(self.context, group)
|
||||
cg = objects.ConsistencyGroup.get_by_id(
|
||||
@ -5480,8 +5473,7 @@ class ConsistencyGroupTestCase(BaseVolumeTestCase):
|
||||
status='creating',
|
||||
size=1)
|
||||
self.volume.host = 'host1@backend2'
|
||||
volume_id = volume['id']
|
||||
self.volume.create_volume(self.context, volume_id)
|
||||
self.volume.create_volume(self.context, volume.id, volume=volume)
|
||||
|
||||
self.assertRaises(exception.InvalidVolume,
|
||||
self.volume.delete_consistencygroup,
|
||||
|
@ -17,6 +17,7 @@ Unit Tests for cinder.volume.rpcapi
|
||||
"""
|
||||
import copy
|
||||
|
||||
import mock
|
||||
from oslo_config import cfg
|
||||
from oslo_serialization import jsonutils
|
||||
|
||||
@ -84,6 +85,7 @@ class VolumeRpcAPITestCase(test.TestCase):
|
||||
group2 = objects.ConsistencyGroup.get_by_id(self.context, group2.id)
|
||||
cgsnapshot = objects.CGSnapshot.get_by_id(self.context, cgsnapshot.id)
|
||||
self.fake_volume = jsonutils.to_primitive(volume)
|
||||
self.fake_volume_obj = fake_volume.fake_volume_obj(self.context, **vol)
|
||||
self.fake_volume_metadata = volume["volume_metadata"]
|
||||
self.fake_snapshot = snapshot
|
||||
self.fake_reservations = ["RESERVATION"]
|
||||
@ -117,8 +119,13 @@ class VolumeRpcAPITestCase(test.TestCase):
|
||||
expected_msg = copy.deepcopy(kwargs)
|
||||
if 'volume' in expected_msg:
|
||||
volume = expected_msg['volume']
|
||||
# NOTE(thangp): copy.deepcopy() is making oslo_versionedobjects
|
||||
# think that 'metadata' was changed.
|
||||
if isinstance(volume, objects.Volume):
|
||||
volume.obj_reset_changes()
|
||||
del expected_msg['volume']
|
||||
expected_msg['volume_id'] = volume['id']
|
||||
expected_msg['volume'] = volume
|
||||
if 'snapshot' in expected_msg:
|
||||
snapshot = expected_msg['snapshot']
|
||||
del expected_msg['snapshot']
|
||||
@ -194,6 +201,10 @@ class VolumeRpcAPITestCase(test.TestCase):
|
||||
expected_cgsnapshot = expected_msg[kwarg].obj_to_primitive()
|
||||
cgsnapshot = value.obj_to_primitive()
|
||||
self.assertEqual(expected_cgsnapshot, cgsnapshot)
|
||||
elif isinstance(value, objects.Volume):
|
||||
expected_volume = expected_msg[kwarg].obj_to_primitive()
|
||||
volume = value.obj_to_primitive()
|
||||
self.assertEqual(expected_volume, volume)
|
||||
else:
|
||||
self.assertEqual(expected_msg[kwarg], value)
|
||||
|
||||
@ -219,26 +230,46 @@ class VolumeRpcAPITestCase(test.TestCase):
|
||||
self._test_volume_api('delete_cgsnapshot', rpc_method='cast',
|
||||
cgsnapshot=self.fake_cgsnap, version='1.31')
|
||||
|
||||
def test_create_volume(self):
|
||||
@mock.patch('oslo_messaging.RPCClient.can_send_version',
|
||||
return_value=True)
|
||||
def test_create_volume(self, can_send_version):
|
||||
self._test_volume_api('create_volume',
|
||||
rpc_method='cast',
|
||||
volume=self.fake_volume,
|
||||
volume=self.fake_volume_obj,
|
||||
host='fake_host1',
|
||||
request_spec='fake_request_spec',
|
||||
filter_properties='fake_properties',
|
||||
allow_reschedule=True,
|
||||
version='1.32')
|
||||
can_send_version.assert_called_once_with('1.32')
|
||||
|
||||
@mock.patch('oslo_messaging.RPCClient.can_send_version',
|
||||
return_value=False)
|
||||
def test_create_volume_old(self, can_send_version):
|
||||
# Tests backwards compatibility with older clients
|
||||
self._test_volume_api('create_volume',
|
||||
rpc_method='cast',
|
||||
volume=self.fake_volume_obj,
|
||||
host='fake_host1',
|
||||
request_spec='fake_request_spec',
|
||||
filter_properties='fake_properties',
|
||||
allow_reschedule=True,
|
||||
version='1.24')
|
||||
can_send_version.assert_called_once_with('1.32')
|
||||
|
||||
def test_create_volume_serialization(self):
|
||||
@mock.patch('oslo_messaging.RPCClient.can_send_version',
|
||||
return_value=True)
|
||||
def test_create_volume_serialization(self, can_send_version):
|
||||
request_spec = {"metadata": self.fake_volume_metadata}
|
||||
self._test_volume_api('create_volume',
|
||||
rpc_method='cast',
|
||||
volume=self.fake_volume,
|
||||
volume=self.fake_volume_obj,
|
||||
host='fake_host1',
|
||||
request_spec=request_spec,
|
||||
filter_properties='fake_properties',
|
||||
allow_reschedule=True,
|
||||
version='1.24')
|
||||
version='1.32')
|
||||
can_send_version.assert_called_once_with('1.32')
|
||||
|
||||
def test_delete_volume(self):
|
||||
self._test_volume_api('delete_volume',
|
||||
|
@ -17,8 +17,8 @@ import datetime
|
||||
import mock
|
||||
|
||||
from cinder import context
|
||||
from cinder import db
|
||||
from cinder import exception
|
||||
from cinder import objects
|
||||
from cinder import test
|
||||
from cinder.tests.unit import utils
|
||||
from cinder.transfer import api as transfer_api
|
||||
@ -35,10 +35,9 @@ class VolumeTransferTestCase(test.TestCase):
|
||||
@mock.patch('cinder.volume.utils.notify_about_volume_usage')
|
||||
def test_transfer_volume_create_delete(self, mock_notify):
|
||||
tx_api = transfer_api.API()
|
||||
utils.create_volume(self.ctxt, id='1',
|
||||
updated_at=self.updated_at)
|
||||
response = tx_api.create(self.ctxt, '1', 'Description')
|
||||
volume = db.volume_get(self.ctxt, '1')
|
||||
volume = utils.create_volume(self.ctxt, updated_at=self.updated_at)
|
||||
response = tx_api.create(self.ctxt, volume.id, 'Description')
|
||||
volume = objects.Volume.get_by_id(self.ctxt, volume.id)
|
||||
self.assertEqual('awaiting-transfer', volume['status'],
|
||||
'Unexpected state')
|
||||
calls = [mock.call(self.ctxt, mock.ANY, "transfer.create.start"),
|
||||
@ -47,7 +46,7 @@ class VolumeTransferTestCase(test.TestCase):
|
||||
self.assertEqual(2, mock_notify.call_count)
|
||||
|
||||
tx_api.delete(self.ctxt, response['id'])
|
||||
volume = db.volume_get(self.ctxt, '1')
|
||||
volume = objects.Volume.get_by_id(self.ctxt, volume.id)
|
||||
self.assertEqual('available', volume['status'], 'Unexpected state')
|
||||
calls = [mock.call(self.ctxt, mock.ANY, "transfer.delete.start"),
|
||||
mock.call(self.ctxt, mock.ANY, "transfer.delete.end")]
|
||||
@ -56,22 +55,21 @@ class VolumeTransferTestCase(test.TestCase):
|
||||
|
||||
def test_transfer_invalid_volume(self):
|
||||
tx_api = transfer_api.API()
|
||||
utils.create_volume(self.ctxt, id='1', status='in-use',
|
||||
updated_at=self.updated_at)
|
||||
volume = utils.create_volume(self.ctxt, status='in-use',
|
||||
updated_at=self.updated_at)
|
||||
self.assertRaises(exception.InvalidVolume,
|
||||
tx_api.create,
|
||||
self.ctxt, '1', 'Description')
|
||||
volume = db.volume_get(self.ctxt, '1')
|
||||
self.ctxt, volume.id, 'Description')
|
||||
volume = objects.Volume.get_by_id(self.ctxt, volume.id)
|
||||
self.assertEqual('in-use', volume['status'], 'Unexpected state')
|
||||
|
||||
@mock.patch('cinder.volume.utils.notify_about_volume_usage')
|
||||
def test_transfer_accept(self, mock_notify):
|
||||
svc = self.start_service('volume', host='test_host')
|
||||
tx_api = transfer_api.API()
|
||||
utils.create_volume(self.ctxt, id='1',
|
||||
updated_at=self.updated_at)
|
||||
transfer = tx_api.create(self.ctxt, '1', 'Description')
|
||||
volume = db.volume_get(self.ctxt, '1')
|
||||
volume = utils.create_volume(self.ctxt, updated_at=self.updated_at)
|
||||
transfer = tx_api.create(self.ctxt, volume.id, 'Description')
|
||||
volume = objects.Volume.get_by_id(self.ctxt, volume.id)
|
||||
self.assertEqual('awaiting-transfer', volume['status'],
|
||||
'Unexpected state')
|
||||
|
||||
@ -88,11 +86,13 @@ class VolumeTransferTestCase(test.TestCase):
|
||||
mock_notify.assert_has_calls(calls)
|
||||
self.assertEqual(2, mock_notify.call_count)
|
||||
|
||||
db.volume_update(self.ctxt, '1', {'status': 'wrong'})
|
||||
volume.status = 'wrong'
|
||||
volume.save()
|
||||
self.assertRaises(exception.InvalidVolume,
|
||||
tx_api.accept,
|
||||
self.ctxt, transfer['id'], transfer['auth_key'])
|
||||
db.volume_update(self.ctxt, '1', {'status': 'awaiting-transfer'})
|
||||
volume.status = 'awaiting-transfer'
|
||||
volume.save()
|
||||
|
||||
# Because the InvalidVolume exception is raised in tx_api, so there is
|
||||
# only transfer.accept.start called and missing transfer.accept.end.
|
||||
@ -105,15 +105,13 @@ class VolumeTransferTestCase(test.TestCase):
|
||||
response = tx_api.accept(self.ctxt,
|
||||
transfer['id'],
|
||||
transfer['auth_key'])
|
||||
volume = db.volume_get(self.ctxt, '1')
|
||||
self.assertEqual('new_project_id', volume['project_id'],
|
||||
'Unexpected project id')
|
||||
self.assertEqual('new_user_id', volume['user_id'],
|
||||
'Unexpected user id')
|
||||
volume = objects.Volume.get_by_id(self.ctxt, volume.id)
|
||||
self.assertEqual('new_project_id', volume.project_id)
|
||||
self.assertEqual('new_user_id', volume.user_id)
|
||||
|
||||
self.assertEqual(volume['id'], response['volume_id'],
|
||||
self.assertEqual(response['volume_id'], volume.id,
|
||||
'Unexpected volume id in response.')
|
||||
self.assertEqual(transfer['id'], response['id'],
|
||||
self.assertEqual(response['id'], transfer['id'],
|
||||
'Unexpected transfer id in response.')
|
||||
|
||||
calls = [mock.call(self.ctxt, mock.ANY, "transfer.accept.start"),
|
||||
@ -125,8 +123,7 @@ class VolumeTransferTestCase(test.TestCase):
|
||||
|
||||
def test_transfer_get(self):
|
||||
tx_api = transfer_api.API()
|
||||
volume = utils.create_volume(self.ctxt, id='1',
|
||||
updated_at=self.updated_at)
|
||||
volume = utils.create_volume(self.ctxt, updated_at=self.updated_at)
|
||||
transfer = tx_api.create(self.ctxt, volume['id'], 'Description')
|
||||
t = tx_api.get(self.ctxt, transfer['id'])
|
||||
self.assertEqual(t['id'], transfer['id'], 'Unexpected transfer id')
|
||||
@ -136,7 +133,7 @@ class VolumeTransferTestCase(test.TestCase):
|
||||
|
||||
nctxt = context.RequestContext(user_id='new_user_id',
|
||||
project_id='new_project_id')
|
||||
utils.create_volume(nctxt, id='2', updated_at=self.updated_at)
|
||||
utils.create_volume(nctxt, updated_at=self.updated_at)
|
||||
self.assertRaises(exception.TransferNotFound,
|
||||
tx_api.get,
|
||||
nctxt,
|
||||
@ -148,8 +145,7 @@ class VolumeTransferTestCase(test.TestCase):
|
||||
@mock.patch('cinder.volume.utils.notify_about_volume_usage')
|
||||
def test_delete_transfer_with_deleted_volume(self, mock_notify):
|
||||
# create a volume
|
||||
volume = utils.create_volume(self.ctxt, id='1',
|
||||
updated_at=self.updated_at)
|
||||
volume = utils.create_volume(self.ctxt, updated_at=self.updated_at)
|
||||
# create a transfer
|
||||
tx_api = transfer_api.API()
|
||||
transfer = tx_api.create(self.ctxt, volume['id'], 'Description')
|
||||
@ -161,7 +157,7 @@ class VolumeTransferTestCase(test.TestCase):
|
||||
mock_notify.assert_has_calls(calls)
|
||||
self.assertEqual(2, mock_notify.call_count)
|
||||
# force delete volume
|
||||
db.volume_destroy(context.get_admin_context(), volume['id'])
|
||||
volume.destroy()
|
||||
# Make sure transfer has been deleted.
|
||||
self.assertRaises(exception.TransferNotFound,
|
||||
tx_api.get,
|
||||
|
@ -52,7 +52,8 @@ def create_volume(ctxt,
|
||||
vol['user_id'] = ctxt.user_id
|
||||
vol['project_id'] = ctxt.project_id
|
||||
vol['status'] = status
|
||||
vol['migration_status'] = migration_status
|
||||
if migration_status:
|
||||
vol['migration_status'] = migration_status
|
||||
vol['display_name'] = display_name
|
||||
vol['display_description'] = display_description
|
||||
vol['attach_status'] = 'detached'
|
||||
@ -64,11 +65,16 @@ def create_volume(ctxt,
|
||||
for key in kwargs:
|
||||
vol[key] = kwargs[key]
|
||||
vol['replication_status'] = replication_status
|
||||
vol['replication_extended_status'] = replication_extended_status
|
||||
vol['replication_driver_data'] = replication_driver_data
|
||||
vol['previous_status'] = previous_status
|
||||
if replication_extended_status:
|
||||
vol['replication_extended_status'] = replication_extended_status
|
||||
if replication_driver_data:
|
||||
vol['replication_driver_data'] = replication_driver_data
|
||||
if previous_status:
|
||||
vol['previous_status'] = previous_status
|
||||
|
||||
return db.volume_create(ctxt, vol)
|
||||
volume = objects.Volume(ctxt, **vol)
|
||||
volume.create()
|
||||
return volume
|
||||
|
||||
|
||||
def attach_volume(ctxt, volume_id, instance_uuid, attached_host,
|
||||
|
@ -36,9 +36,9 @@ class FakeSchedulerRpcAPI(object):
|
||||
self.expected_spec = expected_spec
|
||||
self.test_inst = test_inst
|
||||
|
||||
def create_volume(self, ctxt, volume, volume_ref, snapshot_id=None,
|
||||
def create_volume(self, ctxt, topic, volume_id, snapshot_id=None,
|
||||
image_id=None, request_spec=None,
|
||||
filter_properties=None):
|
||||
filter_properties=None, volume=None):
|
||||
|
||||
self.test_inst.assertEqual(self.expected_spec, request_spec)
|
||||
|
||||
|
@ -50,17 +50,21 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
||||
# called to avoid div by zero errors.
|
||||
self.counter = float(0)
|
||||
|
||||
@mock.patch('cinder.objects.Volume.get_by_id')
|
||||
@mock.patch('cinder.volume.utils.extract_host')
|
||||
@mock.patch('time.time', side_effect=time_inc)
|
||||
@mock.patch('cinder.objects.ConsistencyGroup.get_by_id')
|
||||
def test_cast_create_volume(self, consistencygroup_get_by_id, mock_time,
|
||||
mock_extract_host):
|
||||
mock_extract_host, volume_get_by_id):
|
||||
volume = fake_volume.fake_volume_obj(self.ctxt)
|
||||
volume_get_by_id.return_value = volume
|
||||
props = {}
|
||||
cg_obj = (fake_consistencygroup.
|
||||
fake_consistencyobject_obj(self.ctxt, consistencygroup_id=1,
|
||||
host='host@backend#pool'))
|
||||
consistencygroup_get_by_id.return_value = cg_obj
|
||||
spec = {'volume_id': None,
|
||||
'volume': None,
|
||||
'source_volid': None,
|
||||
'snapshot_id': None,
|
||||
'image_id': None,
|
||||
@ -76,7 +80,8 @@ class CreateVolumeFlowTestCase(test.TestCase):
|
||||
|
||||
task._cast_create_volume(self.ctxt, spec, props)
|
||||
|
||||
spec = {'volume_id': 1,
|
||||
spec = {'volume_id': volume.id,
|
||||
'volume': volume,
|
||||
'source_volid': 2,
|
||||
'snapshot_id': 3,
|
||||
'image_id': 4,
|
||||
@ -346,26 +351,26 @@ class CreateVolumeFlowManagerTestCase(test.TestCase):
|
||||
@mock.patch('cinder.volume.flows.manager.create_volume.'
|
||||
'CreateVolumeFromSpecTask.'
|
||||
'_handle_bootable_volume_glance_meta')
|
||||
@mock.patch('cinder.objects.Volume.get_by_id')
|
||||
@mock.patch('cinder.objects.Snapshot.get_by_id')
|
||||
def test_create_from_snapshot(self, snapshot_get_by_id, handle_bootable):
|
||||
def test_create_from_snapshot(self, snapshot_get_by_id, volume_get_by_id,
|
||||
handle_bootable):
|
||||
fake_db = mock.MagicMock()
|
||||
fake_driver = mock.MagicMock()
|
||||
fake_volume_manager = mock.MagicMock()
|
||||
fake_manager = create_volume_manager.CreateVolumeFromSpecTask(
|
||||
fake_volume_manager, fake_db, fake_driver)
|
||||
volume = fake_volume.fake_db_volume()
|
||||
orig_volume_db = mock.MagicMock(id=10, bootable=True)
|
||||
volume_db = {'bootable': True}
|
||||
volume_obj = fake_volume.fake_volume_obj(self.ctxt, **volume_db)
|
||||
snapshot_obj = fake_snapshot.fake_snapshot_obj(self.ctxt)
|
||||
snapshot_get_by_id.return_value = snapshot_obj
|
||||
fake_db.volume_get.return_value = orig_volume_db
|
||||
volume_get_by_id.return_value = volume_obj
|
||||
|
||||
fake_manager._create_from_snapshot(self.ctxt, volume,
|
||||
fake_manager._create_from_snapshot(self.ctxt, volume_obj,
|
||||
snapshot_obj.id)
|
||||
fake_driver.create_volume_from_snapshot.assert_called_once_with(
|
||||
volume, snapshot_obj)
|
||||
fake_db.volume_get.assert_called_once_with(self.ctxt,
|
||||
snapshot_obj.volume_id)
|
||||
handle_bootable.assert_called_once_with(self.ctxt, volume['id'],
|
||||
volume_obj, snapshot_obj)
|
||||
handle_bootable.assert_called_once_with(self.ctxt, volume_obj.id,
|
||||
snapshot_id=snapshot_obj.id)
|
||||
|
||||
@mock.patch('cinder.objects.Snapshot.get_by_id')
|
||||
@ -620,11 +625,13 @@ class CreateVolumeFlowManagerImageCacheTestCase(test.TestCase):
|
||||
image_meta=image_meta
|
||||
)
|
||||
|
||||
@mock.patch('cinder.db.volume_update')
|
||||
@mock.patch('cinder.objects.Volume.get_by_id')
|
||||
@mock.patch('cinder.image.image_utils.qemu_img_info')
|
||||
def test_create_from_image_cache_miss(
|
||||
self, mock_qemu_info, mock_get_internal_context,
|
||||
mock_create_from_img_dl, mock_create_from_src,
|
||||
mock_handle_bootable, mock_fetch_img):
|
||||
self, mock_qemu_info, mock_volume_get, mock_volume_update,
|
||||
mock_get_internal_context, mock_create_from_img_dl,
|
||||
mock_create_from_src, mock_handle_bootable, mock_fetch_img):
|
||||
mock_get_internal_context.return_value = self.ctxt
|
||||
mock_fetch_img.return_value = mock.MagicMock(
|
||||
spec=utils.get_file_spec())
|
||||
@ -636,13 +643,7 @@ class CreateVolumeFlowManagerImageCacheTestCase(test.TestCase):
|
||||
|
||||
volume = fake_volume.fake_volume_obj(self.ctxt, size=10,
|
||||
host='foo@bar#pool')
|
||||
image_volume = fake_volume.fake_db_volume(size=2)
|
||||
self.mock_db.volume_create.return_value = image_volume
|
||||
|
||||
def update_volume(ctxt, id, updates):
|
||||
volume.update(updates)
|
||||
return volume
|
||||
self.mock_db.volume_update.side_effect = update_volume
|
||||
mock_volume_get.return_value = volume
|
||||
|
||||
image_location = 'someImageLocationStr'
|
||||
image_id = 'c7a8b8d4-e519-46c7-a0df-ddf1b9b9fff2'
|
||||
@ -676,12 +677,8 @@ class CreateVolumeFlowManagerImageCacheTestCase(test.TestCase):
|
||||
)
|
||||
|
||||
# The volume size should be reduced to virtual_size and then put back
|
||||
self.mock_db.volume_update.assert_any_call(self.ctxt,
|
||||
volume['id'],
|
||||
{'size': 2})
|
||||
self.mock_db.volume_update.assert_any_call(self.ctxt,
|
||||
volume['id'],
|
||||
{'size': 10})
|
||||
mock_volume_update.assert_any_call(self.ctxt, volume.id, {'size': 2})
|
||||
mock_volume_update.assert_any_call(self.ctxt, volume.id, {'size': 10})
|
||||
|
||||
# Make sure created a new cache entry
|
||||
(self.mock_volume_manager.
|
||||
@ -695,9 +692,12 @@ class CreateVolumeFlowManagerImageCacheTestCase(test.TestCase):
|
||||
image_meta=image_meta
|
||||
)
|
||||
|
||||
@mock.patch('cinder.db.volume_update')
|
||||
@mock.patch('cinder.objects.Volume.get_by_id')
|
||||
@mock.patch('cinder.image.image_utils.qemu_img_info')
|
||||
def test_create_from_image_cache_miss_error_downloading(
|
||||
self, mock_qemu_info, mock_get_internal_context,
|
||||
self, mock_qemu_info, mock_volume_get, mock_volume_update,
|
||||
mock_get_internal_context,
|
||||
mock_create_from_img_dl, mock_create_from_src,
|
||||
mock_handle_bootable, mock_fetch_img):
|
||||
mock_fetch_img.return_value = mock.MagicMock()
|
||||
@ -709,16 +709,10 @@ class CreateVolumeFlowManagerImageCacheTestCase(test.TestCase):
|
||||
|
||||
volume = fake_volume.fake_volume_obj(self.ctxt, size=10,
|
||||
host='foo@bar#pool')
|
||||
image_volume = fake_volume.fake_db_volume(size=2)
|
||||
self.mock_db.volume_create.return_value = image_volume
|
||||
mock_volume_get.return_value = volume
|
||||
|
||||
mock_create_from_img_dl.side_effect = exception.CinderException()
|
||||
|
||||
def update_volume(ctxt, id, updates):
|
||||
volume.update(updates)
|
||||
return volume
|
||||
self.mock_db.volume_update.side_effect = update_volume
|
||||
|
||||
image_location = 'someImageLocationStr'
|
||||
image_id = 'c7a8b8d4-e519-46c7-a0df-ddf1b9b9fff2'
|
||||
image_meta = mock.MagicMock()
|
||||
@ -756,13 +750,9 @@ class CreateVolumeFlowManagerImageCacheTestCase(test.TestCase):
|
||||
|
||||
# The volume size should be reduced to virtual_size and then put back,
|
||||
# especially if there is an exception while creating the volume.
|
||||
self.assertEqual(2, self.mock_db.volume_update.call_count)
|
||||
self.mock_db.volume_update.assert_any_call(self.ctxt,
|
||||
volume['id'],
|
||||
{'size': 2})
|
||||
self.mock_db.volume_update.assert_any_call(self.ctxt,
|
||||
volume['id'],
|
||||
{'size': 10})
|
||||
self.assertEqual(2, mock_volume_update.call_count)
|
||||
mock_volume_update.assert_any_call(self.ctxt, volume.id, {'size': 2})
|
||||
mock_volume_update.assert_any_call(self.ctxt, volume.id, {'size': 10})
|
||||
|
||||
# Make sure we didn't try and create a cache entry
|
||||
self.assertFalse(self.mock_cache.ensure_space.called)
|
||||
@ -773,7 +763,7 @@ class CreateVolumeFlowManagerImageCacheTestCase(test.TestCase):
|
||||
mock_create_from_src, mock_handle_bootable, mock_fetch_img):
|
||||
self.mock_driver.clone_image.return_value = (None, False)
|
||||
mock_get_internal_context.return_value = None
|
||||
volume = fake_volume.fake_db_volume()
|
||||
volume = fake_volume.fake_volume_obj(self.ctxt)
|
||||
|
||||
image_location = 'someImageLocationStr'
|
||||
image_id = 'c7a8b8d4-e519-46c7-a0df-ddf1b9b9fff2'
|
||||
|
@ -107,7 +107,8 @@ def check_policy(context, action, target_obj=None):
|
||||
|
||||
if isinstance(target_obj, objects_base.CinderObject):
|
||||
# Turn object into dict so target.update can work
|
||||
target.update(target_obj.obj_to_primitive() or {})
|
||||
target.update(
|
||||
target_obj.obj_to_primitive()['versioned_object.data'] or {})
|
||||
else:
|
||||
target.update(target_obj or {})
|
||||
|
||||
@ -310,7 +311,7 @@ class API(base.Base):
|
||||
'snapshot': snapshot,
|
||||
'image_id': image_id,
|
||||
'raw_volume_type': volume_type,
|
||||
'metadata': metadata,
|
||||
'metadata': metadata or {},
|
||||
'raw_availability_zone': availability_zone,
|
||||
'source_volume': source_volume,
|
||||
'scheduler_hints': scheduler_hints,
|
||||
|
@ -479,7 +479,8 @@ class EntryCreateTask(flow_utils.CinderTask):
|
||||
# Merge in the other required arguments which should provide the rest
|
||||
# of the volume property fields (if applicable).
|
||||
volume_properties.update(kwargs)
|
||||
volume = self.db.volume_create(context, volume_properties)
|
||||
volume = objects.Volume(context=context, **volume_properties)
|
||||
volume.create()
|
||||
|
||||
return {
|
||||
'volume_id': volume['id'],
|
||||
@ -505,16 +506,16 @@ class EntryCreateTask(flow_utils.CinderTask):
|
||||
# already been created and the quota has already been absorbed.
|
||||
return
|
||||
|
||||
vol_id = result['volume_id']
|
||||
volume = result['volume']
|
||||
try:
|
||||
self.db.volume_destroy(context.elevated(), vol_id)
|
||||
volume.destroy()
|
||||
except exception.CinderException:
|
||||
# We are already reverting, therefore we should silence this
|
||||
# exception since a second exception being active will be bad.
|
||||
#
|
||||
# NOTE(harlowja): Being unable to destroy a volume is pretty
|
||||
# bad though!!
|
||||
LOG.exception(_LE("Failed destroying volume entry %s"), vol_id)
|
||||
LOG.exception(_LE("Failed destroying volume entry %s"), volume.id)
|
||||
|
||||
|
||||
class QuotaReserveTask(flow_utils.CinderTask):
|
||||
@ -678,7 +679,7 @@ class VolumeCastTask(flow_utils.CinderTask):
|
||||
|
||||
def __init__(self, scheduler_rpcapi, volume_rpcapi, db):
|
||||
requires = ['image_id', 'scheduler_hints', 'snapshot_id',
|
||||
'source_volid', 'volume_id', 'volume_type',
|
||||
'source_volid', 'volume_id', 'volume', 'volume_type',
|
||||
'volume_properties', 'source_replicaid',
|
||||
'consistencygroup_id', 'cgsnapshot_id', ]
|
||||
super(VolumeCastTask, self).__init__(addons=[ACTION],
|
||||
@ -691,6 +692,7 @@ class VolumeCastTask(flow_utils.CinderTask):
|
||||
source_volid = request_spec['source_volid']
|
||||
source_replicaid = request_spec['source_replicaid']
|
||||
volume_id = request_spec['volume_id']
|
||||
volume = request_spec['volume']
|
||||
snapshot_id = request_spec['snapshot_id']
|
||||
image_id = request_spec['image_id']
|
||||
cgroup_id = request_spec['consistencygroup_id']
|
||||
@ -714,14 +716,17 @@ class VolumeCastTask(flow_utils.CinderTask):
|
||||
# snapshot resides instead of passing it through the scheduler, so
|
||||
# snapshot can be copied to the new volume.
|
||||
snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
|
||||
source_volume_ref = self.db.volume_get(context, snapshot.volume_id)
|
||||
host = source_volume_ref['host']
|
||||
source_volume_ref = objects.Volume.get_by_id(context,
|
||||
snapshot.volume_id)
|
||||
host = source_volume_ref.host
|
||||
elif source_volid:
|
||||
source_volume_ref = self.db.volume_get(context, source_volid)
|
||||
host = source_volume_ref['host']
|
||||
source_volume_ref = objects.Volume.get_by_id(context,
|
||||
source_volid)
|
||||
host = source_volume_ref.host
|
||||
elif source_replicaid:
|
||||
source_volume_ref = self.db.volume_get(context, source_replicaid)
|
||||
host = source_volume_ref['host']
|
||||
source_volume_ref = objects.Volume.get_by_id(context,
|
||||
source_replicaid)
|
||||
host = source_volume_ref.host
|
||||
|
||||
if not host:
|
||||
# Cast to the scheduler and let it handle whatever is needed
|
||||
@ -733,18 +738,19 @@ class VolumeCastTask(flow_utils.CinderTask):
|
||||
snapshot_id=snapshot_id,
|
||||
image_id=image_id,
|
||||
request_spec=request_spec,
|
||||
filter_properties=filter_properties)
|
||||
filter_properties=filter_properties,
|
||||
volume=volume)
|
||||
else:
|
||||
# Bypass the scheduler and send the request directly to the volume
|
||||
# manager.
|
||||
now = timeutils.utcnow()
|
||||
values = {'host': host, 'scheduled_at': now}
|
||||
volume_ref = self.db.volume_update(context, volume_id, values)
|
||||
volume.host = host
|
||||
volume.scheduled_at = timeutils.utcnow()
|
||||
volume.save()
|
||||
if not cgsnapshot_id:
|
||||
self.volume_rpcapi.create_volume(
|
||||
context,
|
||||
volume_ref,
|
||||
volume_ref['host'],
|
||||
volume,
|
||||
volume.host,
|
||||
request_spec,
|
||||
filter_properties,
|
||||
allow_reschedule=False)
|
||||
|
@ -62,7 +62,7 @@ class OnFailureRescheduleTask(flow_utils.CinderTask):
|
||||
|
||||
def __init__(self, reschedule_context, db, scheduler_rpcapi,
|
||||
do_reschedule):
|
||||
requires = ['filter_properties', 'request_spec', 'volume_id',
|
||||
requires = ['filter_properties', 'request_spec', 'volume_ref',
|
||||
'context']
|
||||
super(OnFailureRescheduleTask, self).__init__(addons=[ACTION],
|
||||
requires=requires)
|
||||
@ -94,7 +94,7 @@ class OnFailureRescheduleTask(flow_utils.CinderTask):
|
||||
def execute(self, **kwargs):
|
||||
pass
|
||||
|
||||
def _pre_reschedule(self, context, volume_id):
|
||||
def _pre_reschedule(self, context, volume):
|
||||
"""Actions that happen before the rescheduling attempt occur here."""
|
||||
|
||||
try:
|
||||
@ -112,15 +112,16 @@ class OnFailureRescheduleTask(flow_utils.CinderTask):
|
||||
'host': None,
|
||||
}
|
||||
LOG.debug("Updating volume %(volume_id)s with %(update)s.",
|
||||
{'update': update, 'volume_id': volume_id})
|
||||
self.db.volume_update(context, volume_id, update)
|
||||
{'update': update, 'volume_id': volume.id})
|
||||
volume.update(update)
|
||||
volume.save()
|
||||
except exception.CinderException:
|
||||
# Don't let updating the state cause the rescheduling to fail.
|
||||
LOG.exception(_LE("Volume %s: update volume state failed."),
|
||||
volume_id)
|
||||
volume.id)
|
||||
|
||||
def _reschedule(self, context, cause, request_spec, filter_properties,
|
||||
volume_id):
|
||||
volume):
|
||||
"""Actions that happen during the rescheduling attempt occur here."""
|
||||
|
||||
create_volume = self.scheduler_rpcapi.create_volume
|
||||
@ -131,11 +132,11 @@ class OnFailureRescheduleTask(flow_utils.CinderTask):
|
||||
|
||||
retry_info = filter_properties['retry']
|
||||
num_attempts = retry_info.get('num_attempts', 0)
|
||||
request_spec['volume_id'] = volume_id
|
||||
request_spec['volume_id'] = volume.id
|
||||
|
||||
LOG.debug("Volume %(volume_id)s: re-scheduling %(method)s "
|
||||
"attempt %(num)d due to %(reason)s",
|
||||
{'volume_id': volume_id,
|
||||
{'volume_id': volume.id,
|
||||
'method': common.make_pretty_name(create_volume),
|
||||
'num': num_attempts,
|
||||
'reason': cause.exception_str})
|
||||
@ -144,16 +145,17 @@ class OnFailureRescheduleTask(flow_utils.CinderTask):
|
||||
# Stringify to avoid circular ref problem in json serialization
|
||||
retry_info['exc'] = traceback.format_exception(*cause.exc_info)
|
||||
|
||||
return create_volume(context, CONF.volume_topic, volume_id,
|
||||
return create_volume(context, CONF.volume_topic, volume.id,
|
||||
request_spec=request_spec,
|
||||
filter_properties=filter_properties)
|
||||
filter_properties=filter_properties,
|
||||
volume=volume)
|
||||
|
||||
def _post_reschedule(self, volume_id):
|
||||
def _post_reschedule(self, volume):
|
||||
"""Actions that happen after the rescheduling attempt occur here."""
|
||||
|
||||
LOG.debug("Volume %s: re-scheduled", volume_id)
|
||||
LOG.debug("Volume %s: re-scheduled", volume.id)
|
||||
|
||||
def revert(self, context, result, flow_failures, volume_id, **kwargs):
|
||||
def revert(self, context, result, flow_failures, volume_ref, **kwargs):
|
||||
# NOTE(dulek): Revert is occurring and manager need to know if
|
||||
# rescheduling happened. We're returning boolean flag that will
|
||||
# indicate that. It which will be available in flow engine store
|
||||
@ -162,16 +164,16 @@ class OnFailureRescheduleTask(flow_utils.CinderTask):
|
||||
# If do not want to be rescheduled, just set the volume's status to
|
||||
# error and return.
|
||||
if not self.do_reschedule:
|
||||
common.error_out_volume(context, self.db, volume_id)
|
||||
LOG.error(_LE("Volume %s: create failed"), volume_id)
|
||||
common.error_out_volume(context, self.db, volume_ref.id)
|
||||
LOG.error(_LE("Volume %s: create failed"), volume_ref.id)
|
||||
return False
|
||||
|
||||
# Check if we have a cause which can tell us not to reschedule and
|
||||
# set the volume's status to error.
|
||||
for failure in flow_failures.values():
|
||||
if failure.check(*self.no_reschedule_types):
|
||||
common.error_out_volume(context, self.db, volume_id)
|
||||
LOG.error(_LE("Volume %s: create failed"), volume_id)
|
||||
common.error_out_volume(context, self.db, volume_ref.id)
|
||||
LOG.error(_LE("Volume %s: create failed"), volume_ref.id)
|
||||
return False
|
||||
|
||||
# Use a different context when rescheduling.
|
||||
@ -179,12 +181,13 @@ class OnFailureRescheduleTask(flow_utils.CinderTask):
|
||||
cause = list(flow_failures.values())[0]
|
||||
context = self.reschedule_context
|
||||
try:
|
||||
self._pre_reschedule(context, volume_id)
|
||||
self._reschedule(context, cause, volume_id=volume_id, **kwargs)
|
||||
self._post_reschedule(volume_id)
|
||||
self._pre_reschedule(context, volume_ref)
|
||||
self._reschedule(context, cause, volume=volume_ref, **kwargs)
|
||||
self._post_reschedule(volume_ref)
|
||||
return True
|
||||
except exception.CinderException:
|
||||
LOG.exception(_LE("Volume %s: rescheduling failed"), volume_id)
|
||||
LOG.exception(_LE("Volume %s: rescheduling failed"),
|
||||
volume_ref.id)
|
||||
|
||||
return False
|
||||
|
||||
@ -206,8 +209,7 @@ class ExtractVolumeRefTask(flow_utils.CinderTask):
|
||||
#
|
||||
# In the future we might want to have a lock on the volume_id so that
|
||||
# the volume can not be deleted while its still being created?
|
||||
volume_ref = self.db.volume_get(context, volume_id)
|
||||
return volume_ref
|
||||
return objects.Volume.get_by_id(context, volume_id)
|
||||
|
||||
def revert(self, context, volume_id, result, **kwargs):
|
||||
if isinstance(result, ft.Failure) or not self.set_error:
|
||||
@ -269,7 +271,8 @@ class ExtractVolumeSpecTask(flow_utils.CinderTask):
|
||||
# NOTE(harlowja): This will likely fail if the source volume
|
||||
# disappeared by the time this call occurred.
|
||||
source_volid = volume_ref.get('source_volid')
|
||||
source_volume_ref = self.db.volume_get(context, source_volid)
|
||||
source_volume_ref = objects.Volume.get_by_id(context,
|
||||
source_volid)
|
||||
specs.update({
|
||||
'source_volid': source_volid,
|
||||
# This is captured incase we have to revert and we want to set
|
||||
@ -284,7 +287,8 @@ class ExtractVolumeSpecTask(flow_utils.CinderTask):
|
||||
# NOTE(harlowja): This will likely fail if the replica
|
||||
# disappeared by the time this call occurred.
|
||||
source_volid = request_spec['source_replicaid']
|
||||
source_volume_ref = self.db.volume_get(context, source_volid)
|
||||
source_volume_ref = objects.Volume.get_by_id(context,
|
||||
source_volid)
|
||||
specs.update({
|
||||
'source_replicaid': source_volid,
|
||||
'source_replicastatus': source_volume_ref['status'],
|
||||
@ -443,8 +447,8 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
|
||||
# will not destroy the volume (although they could in the future).
|
||||
make_bootable = False
|
||||
try:
|
||||
originating_vref = self.db.volume_get(context,
|
||||
snapshot.volume_id)
|
||||
originating_vref = objects.Volume.get_by_id(context,
|
||||
snapshot.volume_id)
|
||||
make_bootable = originating_vref.bootable
|
||||
except exception.CinderException as ex:
|
||||
LOG.exception(_LE("Failed fetching snapshot %(snapshot_id)s "
|
||||
@ -476,14 +480,14 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
|
||||
# NOTE(harlowja): likely this is not the best place for this to happen
|
||||
# and we should have proper locks on the source volume while actions
|
||||
# that use the source volume are underway.
|
||||
srcvol_ref = self.db.volume_get(context, source_volid)
|
||||
srcvol_ref = objects.Volume.get_by_id(context, source_volid)
|
||||
model_update = self.driver.create_cloned_volume(volume_ref, srcvol_ref)
|
||||
# NOTE(harlowja): Subtasks would be useful here since after this
|
||||
# point the volume has already been created and further failures
|
||||
# will not destroy the volume (although they could in the future).
|
||||
if srcvol_ref.bootable:
|
||||
self._handle_bootable_volume_glance_meta(context, volume_ref['id'],
|
||||
source_volid=source_volid)
|
||||
self._handle_bootable_volume_glance_meta(
|
||||
context, volume_ref.id, source_volid=volume_ref.id)
|
||||
return model_update
|
||||
|
||||
def _create_from_source_replica(self, context, volume_ref,
|
||||
@ -494,7 +498,7 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
|
||||
# NOTE(harlowja): likely this is not the best place for this to happen
|
||||
# and we should have proper locks on the source volume while actions
|
||||
# that use the source volume are underway.
|
||||
srcvol_ref = self.db.volume_get(context, source_replicaid)
|
||||
srcvol_ref = objects.Volume.get_by_id(context, source_replicaid)
|
||||
model_update = self.driver.create_replica_test_volume(volume_ref,
|
||||
srcvol_ref)
|
||||
# NOTE(harlowja): Subtasks would be useful here since after this
|
||||
@ -754,12 +758,8 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
|
||||
image_id=image_id, reason=reason)
|
||||
|
||||
if virtual_size and virtual_size != original_size:
|
||||
updates = {'size': virtual_size}
|
||||
volume_ref = self.db.volume_update(
|
||||
context,
|
||||
volume_ref['id'],
|
||||
updates
|
||||
)
|
||||
volume_ref.size = virtual_size
|
||||
volume_ref.save()
|
||||
|
||||
model_update = self._create_from_image_download(
|
||||
context,
|
||||
@ -773,9 +773,8 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
|
||||
# Update the newly created volume db entry before we clone it
|
||||
# for the image-volume creation.
|
||||
if model_update:
|
||||
volume_ref = self.db.volume_update(context,
|
||||
volume_ref['id'],
|
||||
model_update)
|
||||
volume_ref.update(model_update)
|
||||
volume_ref.save()
|
||||
self.manager._create_image_cache_volume_entry(internal_context,
|
||||
volume_ref,
|
||||
image_id,
|
||||
@ -785,12 +784,12 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
|
||||
# what was originally requested. If an exception has occurred we
|
||||
# still need to put this back before letting it be raised further
|
||||
# up the stack.
|
||||
if volume_ref['size'] != original_size:
|
||||
if volume_ref.size != original_size:
|
||||
self.driver.extend_volume(volume_ref, original_size)
|
||||
updates = {'size': original_size}
|
||||
self.db.volume_update(context, volume_ref['id'], updates)
|
||||
volume_ref.size = original_size
|
||||
volume_ref.save()
|
||||
|
||||
self._handle_bootable_volume_glance_meta(context, volume_ref['id'],
|
||||
self._handle_bootable_volume_glance_meta(context, volume_ref.id,
|
||||
image_id=image_id,
|
||||
image_meta=image_meta)
|
||||
return model_update
|
||||
@ -839,8 +838,8 @@ class CreateVolumeFromSpecTask(flow_utils.CinderTask):
|
||||
# Persist any model information provided on creation.
|
||||
try:
|
||||
if model_update:
|
||||
volume_ref = self.db.volume_update(context, volume_ref['id'],
|
||||
model_update)
|
||||
volume_ref.update(model_update)
|
||||
volume_ref.save()
|
||||
except exception.CinderException:
|
||||
# If somehow the update failed we want to ensure that the
|
||||
# failure is logged (but not try rescheduling since the volume at
|
||||
@ -872,7 +871,6 @@ class CreateVolumeOnFinishTask(NotifyVolumeActionTask):
|
||||
}
|
||||
|
||||
def execute(self, context, volume, volume_spec):
|
||||
volume_id = volume['id']
|
||||
new_status = self.status_translation.get(volume_spec.get('status'),
|
||||
'available')
|
||||
update = {
|
||||
@ -884,18 +882,19 @@ class CreateVolumeOnFinishTask(NotifyVolumeActionTask):
|
||||
# or are there other side-effects that this will cause if the
|
||||
# status isn't updated correctly (aka it will likely be stuck in
|
||||
# 'creating' if this fails)??
|
||||
volume_ref = self.db.volume_update(context, volume_id, update)
|
||||
volume.update(update)
|
||||
volume.save()
|
||||
# Now use the parent to notify.
|
||||
super(CreateVolumeOnFinishTask, self).execute(context, volume_ref)
|
||||
super(CreateVolumeOnFinishTask, self).execute(context, volume)
|
||||
except exception.CinderException:
|
||||
LOG.exception(_LE("Failed updating volume %(volume_id)s with "
|
||||
"%(update)s"), {'volume_id': volume_id,
|
||||
"%(update)s"), {'volume_id': volume.id,
|
||||
'update': update})
|
||||
# Even if the update fails, the volume is ready.
|
||||
LOG.info(_LI("Volume %(volume_name)s (%(volume_id)s): "
|
||||
"created successfully"),
|
||||
{'volume_name': volume_spec['volume_name'],
|
||||
'volume_id': volume_id})
|
||||
'volume_id': volume.id})
|
||||
|
||||
|
||||
def get_flow(context, manager, db, driver, scheduler_rpcapi, host, volume_id,
|
||||
|
@ -190,7 +190,7 @@ def locked_snapshot_operation(f):
|
||||
class VolumeManager(manager.SchedulerDependentManager):
|
||||
"""Manages attachable block storage devices."""
|
||||
|
||||
RPC_API_VERSION = '1.31'
|
||||
RPC_API_VERSION = '1.32'
|
||||
|
||||
target = messaging.Target(version=RPC_API_VERSION)
|
||||
|
||||
@ -476,9 +476,16 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||
return self.driver.initialized
|
||||
|
||||
def create_volume(self, context, volume_id, request_spec=None,
|
||||
filter_properties=None, allow_reschedule=True):
|
||||
filter_properties=None, allow_reschedule=True,
|
||||
volume=None):
|
||||
|
||||
"""Creates the volume."""
|
||||
# FIXME(thangp): Remove this in v2.0 of RPC API.
|
||||
if volume is None:
|
||||
# For older clients, mimic the old behavior and look up the volume
|
||||
# by its volume_id.
|
||||
volume = objects.Volume.get_by_id(context, volume_id)
|
||||
|
||||
context_elevated = context.elevated()
|
||||
if filter_properties is None:
|
||||
filter_properties = {}
|
||||
@ -496,7 +503,7 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||
self.driver,
|
||||
self.scheduler_rpcapi,
|
||||
self.host,
|
||||
volume_id,
|
||||
volume.id,
|
||||
allow_reschedule,
|
||||
context,
|
||||
request_spec,
|
||||
@ -505,7 +512,7 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||
)
|
||||
except Exception:
|
||||
msg = _("Create manager volume flow failed.")
|
||||
LOG.exception(msg, resource={'type': 'volume', 'id': volume_id})
|
||||
LOG.exception(msg, resource={'type': 'volume', 'id': volume.id})
|
||||
raise exception.CinderException(msg)
|
||||
|
||||
snapshot_id = request_spec.get('snapshot_id')
|
||||
@ -563,13 +570,13 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||
if not vol_ref:
|
||||
# Flow was reverted and not rescheduled, fetching
|
||||
# volume_ref from the DB, because it will be needed.
|
||||
vol_ref = self.db.volume_get(context, volume_id)
|
||||
vol_ref = objects.Volume.get_by_id(context, volume.id)
|
||||
# NOTE(dulek): Volume wasn't rescheduled so we need to update
|
||||
# volume stats as these are decremented on delete.
|
||||
self._update_allocated_capacity(vol_ref)
|
||||
|
||||
LOG.info(_LI("Created volume successfully."), resource=vol_ref)
|
||||
return vol_ref['id']
|
||||
return vol_ref.id
|
||||
|
||||
@locked_volume_operation
|
||||
def delete_volume(self, context, volume_id, unmanage_only=False):
|
||||
@ -1586,9 +1593,10 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||
new_vol_values = dict(volume)
|
||||
del new_vol_values['id']
|
||||
del new_vol_values['_name_id']
|
||||
new_vol_values.pop('name', None)
|
||||
# We don't copy volume_type because the db sets that according to
|
||||
# volume_type_id, which we do copy
|
||||
del new_vol_values['volume_type']
|
||||
new_vol_values.pop('volume_type', None)
|
||||
if new_type_id:
|
||||
new_vol_values['volume_type_id'] = new_type_id
|
||||
new_vol_values['host'] = host['host']
|
||||
@ -1600,8 +1608,9 @@ class VolumeManager(manager.SchedulerDependentManager):
|
||||
# I think
|
||||
new_vol_values['migration_status'] = 'target:%s' % volume['id']
|
||||
new_vol_values['attach_status'] = 'detached'
|
||||
new_vol_values['volume_attachment'] = []
|
||||
new_volume = self.db.volume_create(ctxt, new_vol_values)
|
||||
new_vol_values.pop('volume_attachment', None)
|
||||
new_volume = objects.Volume(context=ctxt, **new_vol_values)
|
||||
new_volume.create()
|
||||
rpcapi.create_volume(ctxt, new_volume, host['host'],
|
||||
None, None, allow_reschedule=False)
|
||||
|
||||
|
@ -79,6 +79,7 @@ class VolumeAPI(object):
|
||||
1.31 - Updated: create_consistencygroup_from_src(), create_cgsnapshot()
|
||||
and delete_cgsnapshot() to cast method only with necessary
|
||||
args. Forwarding CGSnapshot object instead of CGSnapshot_id.
|
||||
1.32 - Adds support for sending objects over RPC in create_volume().
|
||||
"""
|
||||
|
||||
BASE_RPC_API_VERSION = '1.0'
|
||||
@ -88,7 +89,11 @@ class VolumeAPI(object):
|
||||
target = messaging.Target(topic=CONF.volume_topic,
|
||||
version=self.BASE_RPC_API_VERSION)
|
||||
serializer = objects_base.CinderObjectSerializer()
|
||||
self.client = rpc.get_client(target, '1.31', serializer=serializer)
|
||||
|
||||
# NOTE(thangp): Until version pinning is impletemented, set the client
|
||||
# version_cap to None
|
||||
self.client = rpc.get_client(target, version_cap=None,
|
||||
serializer=serializer)
|
||||
|
||||
def create_consistencygroup(self, ctxt, group, host):
|
||||
new_host = utils.extract_host(host)
|
||||
@ -132,14 +137,20 @@ class VolumeAPI(object):
|
||||
|
||||
def create_volume(self, ctxt, volume, host, request_spec,
|
||||
filter_properties, allow_reschedule=True):
|
||||
new_host = utils.extract_host(host)
|
||||
cctxt = self.client.prepare(server=new_host, version='1.24')
|
||||
request_spec_p = jsonutils.to_primitive(request_spec)
|
||||
cctxt.cast(ctxt, 'create_volume',
|
||||
volume_id=volume['id'],
|
||||
request_spec=request_spec_p,
|
||||
filter_properties=filter_properties,
|
||||
allow_reschedule=allow_reschedule)
|
||||
msg_args = {'volume_id': volume.id, 'request_spec': request_spec_p,
|
||||
'filter_properties': filter_properties,
|
||||
'allow_reschedule': allow_reschedule}
|
||||
if self.client.can_send_version('1.32'):
|
||||
version = '1.32'
|
||||
msg_args['volume'] = volume
|
||||
else:
|
||||
version = '1.24'
|
||||
|
||||
new_host = utils.extract_host(host)
|
||||
cctxt = self.client.prepare(server=new_host, version=version)
|
||||
request_spec_p = jsonutils.to_primitive(request_spec)
|
||||
cctxt.cast(ctxt, 'create_volume', **msg_args)
|
||||
|
||||
def delete_volume(self, ctxt, volume, unmanage_only=False):
|
||||
new_host = utils.extract_host(volume['host'])
|
||||
|
@ -76,6 +76,8 @@ objects_ignore_messages = [
|
||||
"Module 'cinder.objects' has no 'ServiceList' member",
|
||||
"Module 'cinder.objects' has no 'Snapshot' member",
|
||||
"Module 'cinder.objects' has no 'SnapshotList' member",
|
||||
"Module 'cinder.objects' has no 'Volume' member",
|
||||
"Module 'cinder.objects' has no 'VolumeList' member",
|
||||
]
|
||||
objects_ignore_modules = ["cinder/objects/"]
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user