Cinder volume revert to snapshot
This patch implements the spec of reverting volume to latest snapshot. Related tempest and client patches: [1] https://review.openstack.org/#/c/463906/ [2] https://review.openstack.org/#/c/464903/ APIImpact DocImpact Partial-Implements: blueprint revert-volume-to-snapshot Change-Id: Ib20d749c2118c350b5fa0361ed1811296d518a17
This commit is contained in:
parent
54affb4437
commit
8fba9a9080
@ -1697,6 +1697,12 @@ restore:
|
||||
in: body
|
||||
required: true
|
||||
type: object
|
||||
revert:
|
||||
description: |
|
||||
The ``revert`` action.
|
||||
in: body
|
||||
required: true
|
||||
type: object
|
||||
safe_to_manage:
|
||||
description: |
|
||||
If the resource can be managed or not.
|
||||
@ -1776,6 +1782,13 @@ snapshot_id_3:
|
||||
in: body
|
||||
required: true
|
||||
type: string
|
||||
snapshot_id_4:
|
||||
description: |
|
||||
The UUID of the snapshot. The API
|
||||
reverts the volume with this snapshot.
|
||||
in: body
|
||||
required: true
|
||||
type: string
|
||||
source-name:
|
||||
description: |
|
||||
The resource's name.
|
||||
|
@ -0,0 +1,5 @@
|
||||
{
|
||||
"revert": {
|
||||
"snapshot_id": "5aa119a8-d25b-45a7-8d1b-88e127885635"
|
||||
}
|
||||
}
|
@ -62,7 +62,8 @@ Reset a volume's statuses
|
||||
|
||||
.. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action
|
||||
|
||||
Administrator only. Resets the status, attach status, and migration status for a volume. Specify the ``os-reset_status`` action in the request body.
|
||||
Administrator only. Resets the status, attach status, revert to snapshot,
|
||||
and migration status for a volume. Specify the ``os-reset_status`` action in the request body.
|
||||
|
||||
Normal response codes: 202
|
||||
|
||||
@ -86,9 +87,31 @@ Request Example
|
||||
:language: javascript
|
||||
|
||||
|
||||
Revert volume to snapshot
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. rest_method:: POST /v3/{project_id}/volumes/{volume_id}/action
|
||||
|
||||
Revert a volume to its latest snapshot, this API only support reverting a detached volume.
|
||||
|
||||
Normal response codes: 202
|
||||
Error response codes: 400, 404
|
||||
|
||||
Request
|
||||
-------
|
||||
|
||||
.. rest_parameters:: parameters.yaml
|
||||
|
||||
- project_id: project_id_path
|
||||
- volume_id: volume_id
|
||||
- revert: revert
|
||||
- snapshot_id: snapshot_id_4
|
||||
|
||||
Request Example
|
||||
---------------
|
||||
|
||||
.. literalinclude:: ./samples/volume-revert-to-snapshot-request.json
|
||||
:language: javascript
|
||||
|
||||
|
||||
Set image metadata for a volume
|
||||
|
@ -93,6 +93,8 @@ REST_API_VERSION_HISTORY = """
|
||||
* 3.37 - Support sort backup by "name".
|
||||
* 3.38 - Add replication group API (Tiramisu).
|
||||
* 3.39 - Add ``project_id`` admin filters support to limits.
|
||||
* 3.40 - Add volume revert to its latest snapshot support.
|
||||
|
||||
"""
|
||||
|
||||
# The minimum and maximum versions of the API supported
|
||||
@ -100,7 +102,7 @@ REST_API_VERSION_HISTORY = """
|
||||
# minimum version of the API supported.
|
||||
# Explicitly using /v1 or /v2 endpoints will still work
|
||||
_MIN_API_VERSION = "3.0"
|
||||
_MAX_API_VERSION = "3.39"
|
||||
_MAX_API_VERSION = "3.40"
|
||||
_LEGACY_API_VERSION1 = "1.0"
|
||||
_LEGACY_API_VERSION2 = "2.0"
|
||||
|
||||
|
@ -334,3 +334,7 @@ user documentation.
|
||||
3.39
|
||||
----
|
||||
Add ``project_id`` admin filters support to limits.
|
||||
|
||||
3.40
|
||||
----
|
||||
Add volume revert to its latest snapshot support.
|
||||
|
@ -15,6 +15,7 @@
|
||||
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import uuidutils
|
||||
import six
|
||||
from six.moves import http_client
|
||||
import webob
|
||||
from webob import exc
|
||||
@ -23,6 +24,7 @@ from cinder.api import common
|
||||
from cinder.api.openstack import wsgi
|
||||
from cinder.api.v2 import volumes as volumes_v2
|
||||
from cinder.api.v3.views import volumes as volume_views_v3
|
||||
from cinder import exception
|
||||
from cinder import group as group_api
|
||||
from cinder.i18n import _
|
||||
from cinder import objects
|
||||
@ -160,6 +162,37 @@ class VolumeController(volumes_v2.VolumeController):
|
||||
return view_builder_v3.quick_summary(num_vols, int(sum_size),
|
||||
all_distinct_metadata)
|
||||
|
||||
@wsgi.response(http_client.ACCEPTED)
|
||||
@wsgi.Controller.api_version('3.40')
|
||||
@wsgi.action('revert')
|
||||
def revert(self, req, id, body):
|
||||
"""revert a volume to a snapshot"""
|
||||
|
||||
context = req.environ['cinder.context']
|
||||
self.assert_valid_body(body, 'revert')
|
||||
snapshot_id = body['revert'].get('snapshot_id')
|
||||
volume = self.volume_api.get_volume(context, id)
|
||||
try:
|
||||
l_snap = volume.get_latest_snapshot()
|
||||
except exception.VolumeSnapshotNotFound:
|
||||
msg = _("Volume %s doesn't have any snapshots.")
|
||||
raise exc.HTTPBadRequest(explanation=msg % volume.id)
|
||||
# Ensure volume and snapshot match.
|
||||
if snapshot_id is None or snapshot_id != l_snap.id:
|
||||
msg = _("Specified snapshot %(s_id)s is None or not "
|
||||
"the latest one of volume %(v_id)s.")
|
||||
raise exc.HTTPBadRequest(explanation=msg % {'s_id': snapshot_id,
|
||||
'v_id': volume.id})
|
||||
try:
|
||||
msg = 'Reverting volume %(v_id)s to snapshot %(s_id)s.'
|
||||
LOG.info(msg, {'v_id': volume.id,
|
||||
's_id': l_snap.id})
|
||||
self.volume_api.revert_to_snapshot(context, volume, l_snap)
|
||||
except (exception.InvalidVolume, exception.InvalidSnapshot) as e:
|
||||
raise exc.HTTPConflict(explanation=six.text_type(e))
|
||||
except exception.VolumeSizeExceedsAvailableQuota as e:
|
||||
raise exc.HTTPForbidden(explanation=six.text_type(e))
|
||||
|
||||
@wsgi.response(http_client.ACCEPTED)
|
||||
def create(self, req, body):
|
||||
"""Creates a new volume.
|
||||
|
@ -740,14 +740,21 @@ class LVM(executor.Executor):
|
||||
'udev settle.', name)
|
||||
|
||||
def revert(self, snapshot_name):
|
||||
"""Revert an LV from snapshot.
|
||||
"""Revert an LV to snapshot.
|
||||
|
||||
:param snapshot_name: Name of snapshot to revert
|
||||
|
||||
"""
|
||||
self._execute('lvconvert', '--merge',
|
||||
snapshot_name, root_helper=self._root_helper,
|
||||
run_as_root=True)
|
||||
|
||||
cmd = ['lvconvert', '--merge', '%s/%s' % (self.vg_name, snapshot_name)]
|
||||
try:
|
||||
self._execute(*cmd, root_helper=self._root_helper,
|
||||
run_as_root=True)
|
||||
except putils.ProcessExecutionError as err:
|
||||
LOG.exception('Error Revert Volume')
|
||||
LOG.error('Cmd :%s', err.cmd)
|
||||
LOG.error('StdOut :%s', err.stdout)
|
||||
LOG.error('StdErr :%s', err.stderr)
|
||||
raise
|
||||
|
||||
def lv_has_snapshot(self, name):
|
||||
cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o',
|
||||
|
@ -467,6 +467,11 @@ def snapshot_get_all_for_volume(context, volume_id):
|
||||
return IMPL.snapshot_get_all_for_volume(context, volume_id)
|
||||
|
||||
|
||||
def snapshot_get_latest_for_volume(context, volume_id):
|
||||
"""Get latest snapshot for a volume"""
|
||||
return IMPL.snapshot_get_latest_for_volume(context, volume_id)
|
||||
|
||||
|
||||
def snapshot_update(context, snapshot_id, values):
|
||||
"""Set the given properties on an snapshot and update it.
|
||||
|
||||
|
@ -3028,6 +3028,19 @@ def snapshot_get_all_for_volume(context, volume_id):
|
||||
all()
|
||||
|
||||
|
||||
@require_context
|
||||
def snapshot_get_latest_for_volume(context, volume_id):
|
||||
result = model_query(context, models.Snapshot, read_deleted='no',
|
||||
project_only=True).\
|
||||
filter_by(volume_id=volume_id).\
|
||||
options(joinedload('snapshot_metadata')).\
|
||||
order_by(desc(models.Snapshot.created_at)).\
|
||||
first()
|
||||
if not result:
|
||||
raise exception.VolumeSnapshotNotFound(volume_id=volume_id)
|
||||
return result
|
||||
|
||||
|
||||
@require_context
|
||||
def snapshot_get_all_by_host(context, host, filters=None):
|
||||
if filters and not is_valid_model_filters(models.Snapshot, filters):
|
||||
|
@ -402,6 +402,10 @@ class ServerNotFound(NotFound):
|
||||
message = _("Instance %(uuid)s could not be found.")
|
||||
|
||||
|
||||
class VolumeSnapshotNotFound(NotFound):
|
||||
message = _("No snapshots found for volume %(volume_id)s.")
|
||||
|
||||
|
||||
class VolumeIsBusy(CinderException):
|
||||
message = _("deleting volume %(volume_name)s that has snapshot")
|
||||
|
||||
@ -1199,6 +1203,10 @@ class BadHTTPResponseStatus(VolumeDriverException):
|
||||
message = _("Bad HTTP response status %(status)s")
|
||||
|
||||
|
||||
class BadResetResourceStatus(CinderException):
|
||||
message = _("Bad reset resource status : %(message)s")
|
||||
|
||||
|
||||
# ZADARA STORAGE VPSA driver exception
|
||||
class ZadaraServerCreateFailure(VolumeDriverException):
|
||||
message = _("Unable to create server object for initiator %(name)s")
|
||||
|
@ -55,3 +55,16 @@ class VolumeSnapshotDriver(base.CinderInterface):
|
||||
:param snapshot: The snapshot from which to create the volume.
|
||||
:returns: A dict of database updates for the new volume.
|
||||
"""
|
||||
|
||||
def revert_to_snapshot(self, context, volume, snapshot):
|
||||
"""Revert volume to snapshot.
|
||||
|
||||
Note: the revert process should not change the volume's
|
||||
current size, that means if the driver shrank
|
||||
the volume during the process, it should extend the
|
||||
volume internally.
|
||||
|
||||
:param context: the context of the caller.
|
||||
:param volume: The volume to be reverted.
|
||||
:param snapshot: The snapshot used for reverting.
|
||||
"""
|
||||
|
@ -133,6 +133,7 @@ OBJ_VERSIONS.add('1.22', {'Snapshot': '1.4'})
|
||||
OBJ_VERSIONS.add('1.23', {'VolumeAttachment': '1.2'})
|
||||
OBJ_VERSIONS.add('1.24', {'LogLevel': '1.0', 'LogLevelList': '1.0'})
|
||||
OBJ_VERSIONS.add('1.25', {'Group': '1.2'})
|
||||
OBJ_VERSIONS.add('1.26', {'Snapshot': '1.5'})
|
||||
|
||||
|
||||
class CinderObjectRegistry(base.VersionedObjectRegistry):
|
||||
@ -309,6 +310,12 @@ class CinderPersistentObject(object):
|
||||
kargs = {'expected_attrs': expected_attrs}
|
||||
return cls._from_db_object(context, cls(context), orm_obj, **kargs)
|
||||
|
||||
def update_single_status_where(self, new_status,
|
||||
expected_status, filters=()):
|
||||
values = {'status': new_status}
|
||||
expected_status = {'status': expected_status}
|
||||
return self.conditional_update(values, expected_status, filters)
|
||||
|
||||
def conditional_update(self, values, expected_values=None, filters=(),
|
||||
save_all=False, session=None, reflect_changes=True,
|
||||
order=None):
|
||||
|
@ -126,9 +126,10 @@ class SnapshotStatus(BaseCinderEnum):
|
||||
ERROR_DELETING = 'error_deleting'
|
||||
UNMANAGING = 'unmanaging'
|
||||
BACKING_UP = 'backing-up'
|
||||
RESTORING = 'restoring'
|
||||
|
||||
ALL = (ERROR, AVAILABLE, CREATING, DELETING, DELETED,
|
||||
UPDATING, ERROR_DELETING, UNMANAGING, BACKING_UP)
|
||||
UPDATING, ERROR_DELETING, UNMANAGING, BACKING_UP, RESTORING)
|
||||
|
||||
|
||||
class SnapshotStatusField(BaseEnumField):
|
||||
|
@ -37,7 +37,8 @@ class Snapshot(cleanable.CinderCleanableObject, base.CinderObject,
|
||||
# Version 1.2: This object is now cleanable (adds rows to workers table)
|
||||
# Version 1.3: SnapshotStatusField now includes "unmanaging"
|
||||
# Version 1.4: SnapshotStatusField now includes "backing-up"
|
||||
VERSION = '1.4'
|
||||
# Version 1.5: SnapshotStatusField now includes "restoring"
|
||||
VERSION = '1.5'
|
||||
|
||||
# NOTE(thangp): OPTIONAL_FIELDS are fields that would be lazy-loaded. They
|
||||
# are typically the relationship in the sqlalchemy object.
|
||||
@ -119,12 +120,19 @@ class Snapshot(cleanable.CinderCleanableObject, base.CinderObject,
|
||||
super(Snapshot, self).obj_make_compatible(primitive, target_version)
|
||||
target_version = versionutils.convert_version_to_tuple(target_version)
|
||||
|
||||
if target_version < (1, 3):
|
||||
if primitive.get('status') == c_fields.SnapshotStatus.UNMANAGING:
|
||||
primitive['status'] = c_fields.SnapshotStatus.DELETING
|
||||
if target_version < (1, 4):
|
||||
if primitive.get('status') == c_fields.SnapshotStatus.BACKING_UP:
|
||||
primitive['status'] = c_fields.SnapshotStatus.AVAILABLE
|
||||
backport_statuses = (((1, 3),
|
||||
(c_fields.SnapshotStatus.UNMANAGING,
|
||||
c_fields.SnapshotStatus.DELETING)),
|
||||
((1, 4),
|
||||
(c_fields.SnapshotStatus.BACKING_UP,
|
||||
c_fields.SnapshotStatus.AVAILABLE)),
|
||||
((1, 5),
|
||||
(c_fields.SnapshotStatus.RESTORING,
|
||||
c_fields.SnapshotStatus.AVAILABLE)))
|
||||
for version, status in backport_statuses:
|
||||
if target_version < version:
|
||||
if primitive.get('status') == status[0]:
|
||||
primitive['status'] = status[1]
|
||||
|
||||
@classmethod
|
||||
def _from_db_object(cls, context, snapshot, db_snapshot,
|
||||
|
@ -510,6 +510,13 @@ class Volume(cleanable.CinderCleanableObject, base.CinderObject,
|
||||
dest_volume.save()
|
||||
return dest_volume
|
||||
|
||||
def get_latest_snapshot(self):
|
||||
"""Get volume's latest snapshot"""
|
||||
snapshot_db = db.snapshot_get_latest_for_volume(self._context, self.id)
|
||||
snapshot = objects.Snapshot(self._context)
|
||||
return snapshot._from_db_object(self._context,
|
||||
snapshot, snapshot_db)
|
||||
|
||||
@staticmethod
|
||||
def _is_cleanable(status, obj_version):
|
||||
# Before 1.6 we didn't have workers table, so cleanup wasn't supported.
|
||||
|
@ -25,6 +25,7 @@ from cinder import context
|
||||
from cinder import db
|
||||
from cinder import exception
|
||||
from cinder.group import api as group_api
|
||||
from cinder import objects
|
||||
from cinder import test
|
||||
from cinder.tests.unit.api import fakes
|
||||
from cinder.tests.unit.api.v2 import fakes as v2_fakes
|
||||
@ -38,6 +39,7 @@ from cinder.volume import api as vol_get
|
||||
version_header_name = 'OpenStack-API-Version'
|
||||
|
||||
DEFAULT_AZ = "zone1:host1"
|
||||
REVERT_TO_SNAPSHOT_VERSION = '3.40'
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
@ -488,3 +490,90 @@ class VolumeApiTest(test.TestCase):
|
||||
self.assertIn('provider_id', res_dict['volume'])
|
||||
else:
|
||||
self.assertNotIn('provider_id', res_dict['volume'])
|
||||
|
||||
def _fake_create_volume(self):
|
||||
vol = {
|
||||
'display_name': 'fake_volume1',
|
||||
'status': 'available'
|
||||
}
|
||||
volume = objects.Volume(context=self.ctxt, **vol)
|
||||
volume.create()
|
||||
return volume
|
||||
|
||||
def _fake_create_snapshot(self, volume_id):
|
||||
snap = {
|
||||
'display_name': 'fake_snapshot1',
|
||||
'status': 'available',
|
||||
'volume_id': volume_id
|
||||
}
|
||||
snapshot = objects.Snapshot(context=self.ctxt, **snap)
|
||||
snapshot.create()
|
||||
return snapshot
|
||||
|
||||
@mock.patch.object(objects.Volume, 'get_latest_snapshot')
|
||||
@mock.patch.object(volume_api.API, 'get_volume')
|
||||
def test_volume_revert_with_snapshot_not_found(self, mock_volume,
|
||||
mock_latest):
|
||||
fake_volume = self._fake_create_volume()
|
||||
mock_volume.return_value = fake_volume
|
||||
mock_latest.side_effect = exception.VolumeSnapshotNotFound(volume_id=
|
||||
'fake_id')
|
||||
req = fakes.HTTPRequest.blank('/v3/volumes/fake_id/revert')
|
||||
req.headers = {'OpenStack-API-Version':
|
||||
'volume %s' % REVERT_TO_SNAPSHOT_VERSION}
|
||||
req.api_version_request = api_version.APIVersionRequest(
|
||||
REVERT_TO_SNAPSHOT_VERSION)
|
||||
|
||||
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.revert,
|
||||
req, 'fake_id', {'revert': {'snapshot_id':
|
||||
'fake_snapshot_id'}})
|
||||
|
||||
@mock.patch.object(objects.Volume, 'get_latest_snapshot')
|
||||
@mock.patch.object(volume_api.API, 'get_volume')
|
||||
def test_volume_revert_with_snapshot_not_match(self, mock_volume,
|
||||
mock_latest):
|
||||
fake_volume = self._fake_create_volume()
|
||||
mock_volume.return_value = fake_volume
|
||||
fake_snapshot = self._fake_create_snapshot(fake.UUID1)
|
||||
mock_latest.return_value = fake_snapshot
|
||||
req = fakes.HTTPRequest.blank('/v3/volumes/fake_id/revert')
|
||||
req.headers = {'OpenStack-API-Version':
|
||||
'volume %s' % REVERT_TO_SNAPSHOT_VERSION}
|
||||
req.api_version_request = api_version.APIVersionRequest(
|
||||
REVERT_TO_SNAPSHOT_VERSION)
|
||||
|
||||
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.revert,
|
||||
req, 'fake_id', {'revert': {'snapshot_id':
|
||||
'fake_snapshot_id'}})
|
||||
|
||||
@mock.patch.object(objects.Volume, 'get_latest_snapshot')
|
||||
@mock.patch('cinder.objects.base.'
|
||||
'CinderPersistentObject.update_single_status_where')
|
||||
@mock.patch.object(volume_api.API, 'get_volume')
|
||||
def test_volume_revert_update_status_failed(self,
|
||||
mock_volume,
|
||||
mock_update,
|
||||
mock_latest):
|
||||
fake_volume = self._fake_create_volume()
|
||||
fake_snapshot = self._fake_create_snapshot(fake_volume['id'])
|
||||
mock_volume.return_value = fake_volume
|
||||
mock_latest.return_value = fake_snapshot
|
||||
req = fakes.HTTPRequest.blank('/v3/volumes/%s/revert'
|
||||
% fake_volume['id'])
|
||||
req.headers = {'OpenStack-API-Version':
|
||||
'volume %s' % REVERT_TO_SNAPSHOT_VERSION}
|
||||
req.api_version_request = api_version.APIVersionRequest(
|
||||
REVERT_TO_SNAPSHOT_VERSION)
|
||||
# update volume's status failed
|
||||
mock_update.side_effect = [False, True]
|
||||
|
||||
self.assertRaises(webob.exc.HTTPConflict, self.controller.revert,
|
||||
req, fake_volume['id'], {'revert': {'snapshot_id':
|
||||
fake_snapshot['id']}})
|
||||
|
||||
# update snapshot's status failed
|
||||
mock_update.side_effect = [True, False]
|
||||
|
||||
self.assertRaises(webob.exc.HTTPConflict, self.controller.revert,
|
||||
req, fake_volume['id'], {'revert': {'snapshot_id':
|
||||
fake_snapshot['id']}})
|
||||
|
@ -49,6 +49,9 @@ class FakeBrickLVM(object):
|
||||
def revert(self, snapshot_name):
|
||||
pass
|
||||
|
||||
def deactivate_lv(self, name):
|
||||
pass
|
||||
|
||||
def lv_has_snapshot(self, name):
|
||||
return False
|
||||
|
||||
|
@ -13,6 +13,7 @@
|
||||
# under the License.
|
||||
|
||||
import datetime
|
||||
import ddt
|
||||
import uuid
|
||||
|
||||
from iso8601 import iso8601
|
||||
@ -181,6 +182,7 @@ class TestCinderComparableObject(test_objects.BaseObjectsTestCase):
|
||||
self.assertNotEqual(obj1, None)
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
class TestCinderObjectConditionalUpdate(test.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
@ -726,6 +728,24 @@ class TestCinderObjectConditionalUpdate(test.TestCase):
|
||||
self.assertTrue(res)
|
||||
self.assertTrue(m.called)
|
||||
|
||||
@ddt.data(('available', 'error', None),
|
||||
('error', 'rolling_back', [{'fake_filter': 'faked'}]))
|
||||
@ddt.unpack
|
||||
@mock.patch('cinder.objects.base.'
|
||||
'CinderPersistentObject.conditional_update')
|
||||
def test_update_status_where(self, value, expected, filters, mock_update):
|
||||
volume = self._create_volume()
|
||||
if filters:
|
||||
volume.update_single_status_where(value, expected, filters)
|
||||
mock_update.assert_called_with({'status': value},
|
||||
{'status': expected},
|
||||
filters)
|
||||
else:
|
||||
volume.update_single_status_where(value, expected)
|
||||
mock_update.assert_called_with({'status': value},
|
||||
{'status': expected},
|
||||
())
|
||||
|
||||
|
||||
class TestCinderDictObject(test_objects.BaseObjectsTestCase):
|
||||
@objects.base.CinderObjectRegistry.register_if(False)
|
||||
|
@ -45,7 +45,7 @@ object_data = {
|
||||
'RequestSpec': '1.1-b0bd1a28d191d75648901fa853e8a733',
|
||||
'Service': '1.4-a6727ccda6d4043f5e38e75c7c518c7f',
|
||||
'ServiceList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
|
||||
'Snapshot': '1.4-b7aa184837ccff570b8443bfd1773017',
|
||||
'Snapshot': '1.5-ac1cdbd5b89588f6a8f44afdf6b8b201',
|
||||
'SnapshotList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e',
|
||||
'Volume': '1.6-7d3bc8577839d5725670d55e480fe95f',
|
||||
'VolumeList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
|
||||
|
@ -42,6 +42,7 @@
|
||||
"volume:failover_host": "rule:admin_api",
|
||||
"volume:freeze_host": "rule:admin_api",
|
||||
"volume:thaw_host": "rule:admin_api",
|
||||
"volume:revert_to_snapshot": "",
|
||||
"volume_extension:volume_admin_actions:reset_status": "rule:admin_api",
|
||||
"volume_extension:snapshot_admin_actions:reset_status": "rule:admin_api",
|
||||
"volume_extension:backup_admin_actions:reset_status": "rule:admin_api",
|
||||
|
@ -23,6 +23,7 @@ from mock import call
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import timeutils
|
||||
from oslo_utils import uuidutils
|
||||
import six
|
||||
from sqlalchemy.sql import operators
|
||||
|
||||
from cinder.api import common
|
||||
@ -1546,6 +1547,41 @@ class DBAPISnapshotTestCase(BaseTest):
|
||||
actual = db.snapshot_data_get_for_project(self.ctxt, 'project1')
|
||||
self.assertEqual((1, 42), actual)
|
||||
|
||||
@ddt.data({'time_collection': [1, 2, 3],
|
||||
'latest': 1},
|
||||
{'time_collection': [4, 2, 6],
|
||||
'latest': 2},
|
||||
{'time_collection': [8, 2, 1],
|
||||
'latest': 1})
|
||||
@ddt.unpack
|
||||
def test_snapshot_get_latest_for_volume(self, time_collection, latest):
|
||||
def hours_ago(hour):
|
||||
return timeutils.utcnow() - datetime.timedelta(
|
||||
hours=hour)
|
||||
db.volume_create(self.ctxt, {'id': 1})
|
||||
for snapshot in time_collection:
|
||||
db.snapshot_create(self.ctxt,
|
||||
{'id': snapshot, 'volume_id': 1,
|
||||
'display_name': 'one',
|
||||
'created_at': hours_ago(snapshot),
|
||||
'status': fields.SnapshotStatus.AVAILABLE})
|
||||
|
||||
snapshot = db.snapshot_get_latest_for_volume(self.ctxt, 1)
|
||||
|
||||
self.assertEqual(six.text_type(latest), snapshot['id'])
|
||||
|
||||
def test_snapshot_get_latest_for_volume_not_found(self):
|
||||
|
||||
db.volume_create(self.ctxt, {'id': 1})
|
||||
for t_id in [2, 3]:
|
||||
db.snapshot_create(self.ctxt,
|
||||
{'id': t_id, 'volume_id': t_id,
|
||||
'display_name': 'one',
|
||||
'status': fields.SnapshotStatus.AVAILABLE})
|
||||
|
||||
self.assertRaises(exception.VolumeSnapshotNotFound,
|
||||
db.snapshot_get_latest_for_volume, self.ctxt, 1)
|
||||
|
||||
def test_snapshot_get_all_by_filter(self):
|
||||
db.volume_create(self.ctxt, {'id': 1})
|
||||
db.volume_create(self.ctxt, {'id': 2})
|
||||
|
@ -150,6 +150,7 @@ def create_snapshot(ctxt,
|
||||
snap.user_id = ctxt.user_id or fake.USER_ID
|
||||
snap.project_id = ctxt.project_id or fake.PROJECT_ID
|
||||
snap.status = status
|
||||
snap.metadata = {}
|
||||
snap.volume_size = vol['size']
|
||||
snap.display_name = display_name
|
||||
snap.display_description = display_description
|
||||
|
@ -687,6 +687,32 @@ class LVMVolumeDriverTestCase(test_driver.BaseDriverTestCase):
|
||||
self.volume.driver.manage_existing_snapshot_get_size,
|
||||
snp, ref)
|
||||
|
||||
def test_revert_snapshot(self):
|
||||
self._setup_stubs_for_manage_existing()
|
||||
fake_volume = tests_utils.create_volume(self.context,
|
||||
display_name='fake_volume')
|
||||
fake_snapshot = tests_utils.create_snapshot(
|
||||
self.context, fake_volume.id)
|
||||
|
||||
with mock.patch.object(self.volume.driver.vg,
|
||||
'revert') as mock_revert,\
|
||||
mock.patch.object(self.volume.driver.vg,
|
||||
'create_lv_snapshot') as mock_create,\
|
||||
mock.patch.object(self.volume.driver.vg,
|
||||
'deactivate_lv') as mock_deactive,\
|
||||
mock.patch.object(self.volume.driver.vg,
|
||||
'activate_lv') as mock_active:
|
||||
self.volume.driver.revert_to_snapshot(self.context,
|
||||
fake_volume,
|
||||
fake_snapshot)
|
||||
mock_revert.assert_called_once_with(
|
||||
self.volume.driver._escape_snapshot(fake_snapshot.name))
|
||||
mock_deactive.assert_called_once_with(fake_volume.name)
|
||||
mock_active.assert_called_once_with(fake_volume.name)
|
||||
mock_create.assert_called_once_with(
|
||||
self.volume.driver._escape_snapshot(fake_snapshot.name),
|
||||
fake_volume.name, self.configuration.lvm_type)
|
||||
|
||||
def test_lvm_manage_existing_snapshot_bad_size(self):
|
||||
"""Make sure correct exception on bad size returned from LVM.
|
||||
|
||||
|
@ -1758,6 +1758,142 @@ class VolumeTestCase(base.BaseVolumeTestCase):
|
||||
|
||||
db.volume_destroy(self.context, volume.id)
|
||||
|
||||
def test__revert_to_snapshot_generic_failed(self):
|
||||
fake_volume = tests_utils.create_volume(self.context,
|
||||
status='available')
|
||||
fake_snapshot = tests_utils.create_snapshot(self.context,
|
||||
fake_volume.id)
|
||||
with mock.patch.object(
|
||||
self.volume.driver,
|
||||
'_create_temp_volume_from_snapshot') as mock_temp, \
|
||||
mock.patch.object(
|
||||
self.volume.driver,
|
||||
'delete_volume') as mock_driver_delete, \
|
||||
mock.patch.object(
|
||||
self.volume, '_copy_volume_data') as mock_copy:
|
||||
temp_volume = tests_utils.create_volume(self.context,
|
||||
status='available')
|
||||
mock_copy.side_effect = [exception.VolumeDriverException('error')]
|
||||
mock_temp.return_value = temp_volume
|
||||
|
||||
self.assertRaises(exception.VolumeDriverException,
|
||||
self.volume._revert_to_snapshot_generic,
|
||||
self.context, fake_volume, fake_snapshot)
|
||||
|
||||
mock_copy.assert_called_once_with(
|
||||
self.context, temp_volume, fake_volume)
|
||||
mock_driver_delete.assert_called_once_with(temp_volume)
|
||||
|
||||
def test__revert_to_snapshot_generic(self):
|
||||
fake_volume = tests_utils.create_volume(self.context,
|
||||
status='available')
|
||||
fake_snapshot = tests_utils.create_snapshot(self.context,
|
||||
fake_volume.id)
|
||||
with mock.patch.object(
|
||||
self.volume.driver,
|
||||
'_create_temp_volume_from_snapshot') as mock_temp,\
|
||||
mock.patch.object(
|
||||
self.volume.driver, 'delete_volume') as mock_driver_delete,\
|
||||
mock.patch.object(
|
||||
self.volume, '_copy_volume_data') as mock_copy:
|
||||
temp_volume = tests_utils.create_volume(self.context,
|
||||
status='available')
|
||||
mock_temp.return_value = temp_volume
|
||||
self.volume._revert_to_snapshot_generic(
|
||||
self.context, fake_volume, fake_snapshot)
|
||||
mock_copy.assert_called_once_with(
|
||||
self.context, temp_volume, fake_volume)
|
||||
mock_driver_delete.assert_called_once_with(temp_volume)
|
||||
|
||||
@ddt.data({'driver_error': True},
|
||||
{'driver_error': False})
|
||||
@ddt.unpack
|
||||
def test__revert_to_snapshot(self, driver_error):
|
||||
mock.patch.object(self.volume, '_notify_about_snapshot_usage')
|
||||
with mock.patch.object(self.volume.driver,
|
||||
'revert_to_snapshot') as driver_revert, \
|
||||
mock.patch.object(self.volume, '_notify_about_volume_usage'), \
|
||||
mock.patch.object(self.volume, '_notify_about_snapshot_usage'),\
|
||||
mock.patch.object(self.volume,
|
||||
'_revert_to_snapshot_generic') as generic_revert:
|
||||
if driver_error:
|
||||
driver_revert.side_effect = [NotImplementedError]
|
||||
else:
|
||||
driver_revert.return_value = None
|
||||
|
||||
self.volume._revert_to_snapshot(self.context, {}, {})
|
||||
|
||||
driver_revert.assert_called_once_with(self.context, {}, {})
|
||||
if driver_error:
|
||||
generic_revert.assert_called_once_with(self.context, {}, {})
|
||||
|
||||
@ddt.data(True, False)
|
||||
def test_revert_to_snapshot(self, has_snapshot):
|
||||
fake_volume = tests_utils.create_volume(self.context,
|
||||
status='reverting',
|
||||
project_id='123',
|
||||
size=2)
|
||||
fake_snapshot = tests_utils.create_snapshot(self.context,
|
||||
fake_volume['id'],
|
||||
status='restoring',
|
||||
volume_size=1)
|
||||
with mock.patch.object(self.volume,
|
||||
'_revert_to_snapshot') as _revert,\
|
||||
mock.patch.object(self.volume,
|
||||
'_create_backup_snapshot') as _create_snapshot,\
|
||||
mock.patch.object(self.volume,
|
||||
'delete_snapshot') as _delete_snapshot:
|
||||
_revert.return_value = None
|
||||
if has_snapshot:
|
||||
_create_snapshot.return_value = {'id': 'fake_snapshot'}
|
||||
else:
|
||||
_create_snapshot.return_value = None
|
||||
self.volume.revert_to_snapshot(self.context, fake_volume,
|
||||
fake_snapshot)
|
||||
_revert.assert_called_once_with(self.context, fake_volume,
|
||||
fake_snapshot)
|
||||
_create_snapshot.assert_called_once_with(self.context, fake_volume)
|
||||
if has_snapshot:
|
||||
_delete_snapshot.assert_called_once_with(
|
||||
self.context, {'id': 'fake_snapshot'}, handle_quota=False)
|
||||
else:
|
||||
_delete_snapshot.assert_not_called()
|
||||
fake_volume.refresh()
|
||||
fake_snapshot.refresh()
|
||||
self.assertEqual('available', fake_volume['status'])
|
||||
self.assertEqual('available', fake_snapshot['status'])
|
||||
self.assertEqual(2, fake_volume['size'])
|
||||
|
||||
def test_revert_to_snapshot_failed(self):
|
||||
fake_volume = tests_utils.create_volume(self.context,
|
||||
status='reverting',
|
||||
project_id='123',
|
||||
size=2)
|
||||
fake_snapshot = tests_utils.create_snapshot(self.context,
|
||||
fake_volume['id'],
|
||||
status='restoring',
|
||||
volume_size=1)
|
||||
with mock.patch.object(self.volume,
|
||||
'_revert_to_snapshot') as _revert, \
|
||||
mock.patch.object(self.volume,
|
||||
'_create_backup_snapshot'), \
|
||||
mock.patch.object(self.volume,
|
||||
'delete_snapshot') as _delete_snapshot:
|
||||
_revert.side_effect = [exception.VolumeDriverException(
|
||||
message='fake_message')]
|
||||
self.assertRaises(exception.VolumeDriverException,
|
||||
self.volume.revert_to_snapshot,
|
||||
self.context, fake_volume,
|
||||
fake_snapshot)
|
||||
_revert.assert_called_once_with(self.context, fake_volume,
|
||||
fake_snapshot)
|
||||
_delete_snapshot.assert_not_called()
|
||||
fake_volume.refresh()
|
||||
fake_snapshot.refresh()
|
||||
self.assertEqual('error', fake_volume['status'])
|
||||
self.assertEqual('available', fake_snapshot['status'])
|
||||
self.assertEqual(2, fake_volume['size'])
|
||||
|
||||
def test_cannot_delete_volume_with_snapshots(self):
|
||||
"""Test volume can't be deleted with dependent snapshots."""
|
||||
volume = tests_utils.create_volume(self.context, **self.volume_params)
|
||||
|
@ -353,6 +353,26 @@ class API(base.Base):
|
||||
resource=vref)
|
||||
return vref
|
||||
|
||||
@wrap_check_policy
|
||||
def revert_to_snapshot(self, context, volume, snapshot):
|
||||
"""revert a volume to a snapshot"""
|
||||
|
||||
v_res = volume.update_single_status_where(
|
||||
'reverting', 'available')
|
||||
if not v_res:
|
||||
msg = _("Can't revert volume %s to its latest snapshot. "
|
||||
"Volume's status must be 'available'.") % volume.id
|
||||
raise exception.InvalidVolume(reason=msg)
|
||||
s_res = snapshot.update_single_status_where(
|
||||
fields.SnapshotStatus.RESTORING,
|
||||
fields.SnapshotStatus.AVAILABLE)
|
||||
if not s_res:
|
||||
msg = _("Can't revert volume %s to its latest snapshot. "
|
||||
"Snapshot's status must be 'available'.") % snapshot.id
|
||||
raise exception.InvalidSnapshot(reason=msg)
|
||||
|
||||
self.volume_rpcapi.revert_to_snapshot(context, volume, snapshot)
|
||||
|
||||
@wrap_check_policy
|
||||
def delete(self, context, volume,
|
||||
force=False,
|
||||
|
@ -1199,7 +1199,7 @@ class BaseVD(object):
|
||||
temp_snap_ref.save()
|
||||
return temp_snap_ref
|
||||
|
||||
def _create_temp_volume(self, context, volume):
|
||||
def _create_temp_volume(self, context, volume, volume_options=None):
|
||||
kwargs = {
|
||||
'size': volume.size,
|
||||
'display_name': 'backup-vol-%s' % volume.id,
|
||||
@ -1212,6 +1212,7 @@ class BaseVD(object):
|
||||
'availability_zone': volume.availability_zone,
|
||||
'volume_type_id': volume.volume_type_id,
|
||||
}
|
||||
kwargs.update(volume_options or {})
|
||||
temp_vol_ref = objects.Volume(context=context, **kwargs)
|
||||
temp_vol_ref.create()
|
||||
return temp_vol_ref
|
||||
@ -1230,8 +1231,10 @@ class BaseVD(object):
|
||||
temp_vol_ref.save()
|
||||
return temp_vol_ref
|
||||
|
||||
def _create_temp_volume_from_snapshot(self, context, volume, snapshot):
|
||||
temp_vol_ref = self._create_temp_volume(context, volume)
|
||||
def _create_temp_volume_from_snapshot(self, context, volume, snapshot,
|
||||
volume_options=None):
|
||||
temp_vol_ref = self._create_temp_volume(context, volume,
|
||||
volume_options=volume_options)
|
||||
try:
|
||||
model_update = self.create_volume_from_snapshot(temp_vol_ref,
|
||||
snapshot)
|
||||
@ -2099,6 +2102,17 @@ class VolumeDriver(ManageableVD, CloneableImageVD, ManageableSnapshotsVD,
|
||||
msg = _("Manage existing volume not implemented.")
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
def revert_to_snapshot(self, context, volume, snapshot):
|
||||
"""Revert volume to snapshot.
|
||||
|
||||
Note: the revert process should not change the volume's
|
||||
current size, that means if the driver shrank
|
||||
the volume during the process, it should extend the
|
||||
volume internally.
|
||||
"""
|
||||
msg = _("Revert volume to snapshot not implemented.")
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
def manage_existing_get_size(self, volume, existing_ref):
|
||||
msg = _("Manage existing volume not implemented.")
|
||||
raise NotImplementedError(msg)
|
||||
|
@ -459,6 +459,15 @@ class LVMVolumeDriver(driver.VolumeDriver):
|
||||
# it's quite slow.
|
||||
self._delete_volume(snapshot, is_snapshot=True)
|
||||
|
||||
def revert_to_snapshot(self, context, volume, snapshot):
|
||||
"""Revert a volume to a snapshot"""
|
||||
|
||||
self.vg.revert(self._escape_snapshot(snapshot.name))
|
||||
self.vg.deactivate_lv(volume.name)
|
||||
self.vg.activate_lv(volume.name)
|
||||
# Recreate the snapshot that was destroyed by the revert
|
||||
self.create_snapshot(snapshot)
|
||||
|
||||
def local_path(self, volume, vg=None):
|
||||
if vg is None:
|
||||
vg = self.configuration.volume_group
|
||||
|
@ -870,7 +870,155 @@ class VolumeManager(manager.CleanableManager,
|
||||
volume_ref.status = status
|
||||
volume_ref.save()
|
||||
|
||||
@objects.Snapshot.set_workers
|
||||
def _revert_to_snapshot_generic(self, ctxt, volume, snapshot):
|
||||
"""Generic way to revert volume to a snapshot.
|
||||
|
||||
the framework will use the generic way to implement the revert
|
||||
to snapshot feature:
|
||||
1. create a temporary volume from snapshot
|
||||
2. mount two volumes to host
|
||||
3. copy data from temporary volume to original volume
|
||||
4. detach and destroy temporary volume
|
||||
"""
|
||||
temp_vol = None
|
||||
|
||||
try:
|
||||
v_options = {'display_name': '[revert] temporary volume created '
|
||||
'from snapshot %s' % snapshot.id}
|
||||
ctxt = context.get_internal_tenant_context() or ctxt
|
||||
temp_vol = self.driver._create_temp_volume_from_snapshot(
|
||||
ctxt, volume, snapshot, volume_options=v_options)
|
||||
self._copy_volume_data(ctxt, temp_vol, volume)
|
||||
self.driver.delete_volume(temp_vol)
|
||||
temp_vol.destroy()
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(
|
||||
"Failed to use snapshot %(snapshot)s to create "
|
||||
"a temporary volume and copy data to volume "
|
||||
" %(volume)s.",
|
||||
{'snapshot': snapshot.id,
|
||||
'volume': volume.id})
|
||||
if temp_vol and temp_vol.status == 'available':
|
||||
self.driver.delete_volume(temp_vol)
|
||||
temp_vol.destroy()
|
||||
|
||||
def _revert_to_snapshot(self, context, volume, snapshot):
|
||||
"""Use driver or generic method to rollback volume."""
|
||||
|
||||
self._notify_about_volume_usage(context, volume, "revert.start")
|
||||
self._notify_about_snapshot_usage(context, snapshot, "revert.start")
|
||||
try:
|
||||
self.driver.revert_to_snapshot(context, volume, snapshot)
|
||||
except (NotImplementedError, AttributeError):
|
||||
LOG.info("Driver's 'revert_to_snapshot' is not found. "
|
||||
"Try to use copy-snapshot-to-volume method.")
|
||||
self._revert_to_snapshot_generic(context, volume, snapshot)
|
||||
self._notify_about_volume_usage(context, volume, "revert.end")
|
||||
self._notify_about_snapshot_usage(context, snapshot, "revert.end")
|
||||
|
||||
def _create_backup_snapshot(self, context, volume):
|
||||
kwargs = {
|
||||
'volume_id': volume.id,
|
||||
'user_id': context.user_id,
|
||||
'project_id': context.project_id,
|
||||
'status': fields.SnapshotStatus.CREATING,
|
||||
'progress': '0%',
|
||||
'volume_size': volume.size,
|
||||
'display_name': '[revert] volume %s backup snapshot' % volume.id,
|
||||
'display_description': 'This is only used for backup when '
|
||||
'reverting. If the reverting process '
|
||||
'failed, you can restore you data by '
|
||||
'creating new volume with this snapshot.',
|
||||
'volume_type_id': volume.volume_type_id,
|
||||
'encryption_key_id': volume.encryption_key_id,
|
||||
'metadata': {}
|
||||
}
|
||||
snapshot = objects.Snapshot(context=context, **kwargs)
|
||||
snapshot.create()
|
||||
self.create_snapshot(context, snapshot)
|
||||
return snapshot
|
||||
|
||||
def revert_to_snapshot(self, context, volume, snapshot):
|
||||
"""Revert a volume to a snapshot.
|
||||
|
||||
The process of reverting to snapshot consists of several steps:
|
||||
1. create a snapshot for backup (in case of data loss)
|
||||
2.1. use driver's specific logic to revert volume
|
||||
2.2. try the generic way to revert volume if driver's method is missing
|
||||
3. delete the backup snapshot
|
||||
"""
|
||||
backup_snapshot = None
|
||||
try:
|
||||
LOG.info("Start to perform revert to snapshot process.")
|
||||
# Create a snapshot which can be used to restore the volume
|
||||
# data by hand if revert process failed.
|
||||
backup_snapshot = self._create_backup_snapshot(context, volume)
|
||||
self._revert_to_snapshot(context, volume, snapshot)
|
||||
except Exception as error:
|
||||
with excutils.save_and_reraise_exception():
|
||||
self._notify_about_volume_usage(context, volume,
|
||||
"revert.end")
|
||||
self._notify_about_snapshot_usage(context, snapshot,
|
||||
"revert.end")
|
||||
msg = ('Volume %(v_id)s revert to '
|
||||
'snapshot %(s_id)s failed with %(error)s.')
|
||||
msg_args = {'v_id': volume.id,
|
||||
's_id': snapshot.id,
|
||||
'error': six.text_type(error)}
|
||||
v_res = volume.update_single_status_where(
|
||||
'error',
|
||||
'reverting')
|
||||
if not v_res:
|
||||
msg_args = {"id": volume.id,
|
||||
"status": 'error'}
|
||||
msg += ("Failed to reset volume %(id)s "
|
||||
"status to %(status)s.") % msg_args
|
||||
|
||||
s_res = snapshot.update_single_status_where(
|
||||
fields.SnapshotStatus.AVAILABLE,
|
||||
fields.SnapshotStatus.RESTORING)
|
||||
if not s_res:
|
||||
msg_args = {"id": snapshot.id,
|
||||
"status":
|
||||
fields.SnapshotStatus.ERROR}
|
||||
msg += ("Failed to reset snapshot %(id)s "
|
||||
"status to %(status)s." % msg_args)
|
||||
LOG.exception(msg, msg_args)
|
||||
|
||||
self._notify_about_volume_usage(context, volume,
|
||||
"revert.end")
|
||||
self._notify_about_snapshot_usage(context, snapshot,
|
||||
"revert.end")
|
||||
v_res = volume.update_single_status_where(
|
||||
'available', 'reverting')
|
||||
if not v_res:
|
||||
msg_args = {"id": volume.id,
|
||||
"status": 'available'}
|
||||
msg = _("Revert finished, but failed to reset "
|
||||
"volume %(id)s status to %(status)s, "
|
||||
"please manually reset it.") % msg_args
|
||||
raise exception.BadResetResourceStatus(message=msg)
|
||||
|
||||
s_res = snapshot.update_single_status_where(
|
||||
fields.SnapshotStatus.AVAILABLE,
|
||||
fields.SnapshotStatus.RESTORING)
|
||||
if not s_res:
|
||||
msg_args = {"id": snapshot.id,
|
||||
"status":
|
||||
fields.SnapshotStatus.AVAILABLE}
|
||||
msg = _("Revert finished, but failed to reset "
|
||||
"snapshot %(id)s status to %(status)s, "
|
||||
"please manually reset it.") % msg_args
|
||||
raise exception.BadResetResourceStatus(message=msg)
|
||||
if backup_snapshot:
|
||||
self.delete_snapshot(context,
|
||||
backup_snapshot, handle_quota=False)
|
||||
msg = ('Volume %(v_id)s reverted to snapshot %(snap_id)s '
|
||||
'successfully.')
|
||||
msg_args = {'v_id': volume.id, 'snap_id': snapshot.id}
|
||||
LOG.info(msg, msg_args)
|
||||
|
||||
def create_snapshot(self, context, snapshot):
|
||||
"""Creates and exports the snapshot."""
|
||||
context = context.elevated()
|
||||
@ -928,7 +1076,8 @@ class VolumeManager(manager.CleanableManager,
|
||||
return snapshot.id
|
||||
|
||||
@coordination.synchronized('{snapshot.id}-{f_name}')
|
||||
def delete_snapshot(self, context, snapshot, unmanage_only=False):
|
||||
def delete_snapshot(self, context, snapshot,
|
||||
unmanage_only=False, handle_quota=True):
|
||||
"""Deletes and unexports snapshot."""
|
||||
context = context.elevated()
|
||||
snapshot._context = context
|
||||
@ -964,21 +1113,23 @@ class VolumeManager(manager.CleanableManager,
|
||||
snapshot.save()
|
||||
|
||||
# Get reservations
|
||||
reservations = None
|
||||
try:
|
||||
if CONF.no_snapshot_gb_quota:
|
||||
reserve_opts = {'snapshots': -1}
|
||||
else:
|
||||
reserve_opts = {
|
||||
'snapshots': -1,
|
||||
'gigabytes': -snapshot.volume_size,
|
||||
}
|
||||
volume_ref = self.db.volume_get(context, snapshot.volume_id)
|
||||
QUOTAS.add_volume_type_opts(context,
|
||||
reserve_opts,
|
||||
volume_ref.get('volume_type_id'))
|
||||
reservations = QUOTAS.reserve(context,
|
||||
project_id=project_id,
|
||||
**reserve_opts)
|
||||
if handle_quota:
|
||||
if CONF.no_snapshot_gb_quota:
|
||||
reserve_opts = {'snapshots': -1}
|
||||
else:
|
||||
reserve_opts = {
|
||||
'snapshots': -1,
|
||||
'gigabytes': -snapshot.volume_size,
|
||||
}
|
||||
volume_ref = self.db.volume_get(context, snapshot.volume_id)
|
||||
QUOTAS.add_volume_type_opts(context,
|
||||
reserve_opts,
|
||||
volume_ref.get('volume_type_id'))
|
||||
reservations = QUOTAS.reserve(context,
|
||||
project_id=project_id,
|
||||
**reserve_opts)
|
||||
except Exception:
|
||||
reservations = None
|
||||
LOG.exception("Update snapshot usages failed.",
|
||||
|
@ -132,9 +132,10 @@ class VolumeAPI(rpc.RPCAPI):
|
||||
terminate_connection_snapshot, and remove_export_snapshot.
|
||||
3.14 - Adds enable_replication, disable_replication,
|
||||
failover_replication, and list_replication_targets.
|
||||
3.15 - Add revert_to_snapshot method
|
||||
"""
|
||||
|
||||
RPC_API_VERSION = '3.14'
|
||||
RPC_API_VERSION = '3.15'
|
||||
RPC_DEFAULT_VERSION = '3.0'
|
||||
TOPIC = constants.VOLUME_TOPIC
|
||||
BINARY = 'cinder-volume'
|
||||
@ -165,6 +166,13 @@ class VolumeAPI(rpc.RPCAPI):
|
||||
allow_reschedule=allow_reschedule,
|
||||
volume=volume)
|
||||
|
||||
@rpc.assert_min_rpc_version('3.15')
|
||||
def revert_to_snapshot(self, ctxt, volume, snapshot):
|
||||
version = self._compat_ver('3.15')
|
||||
cctxt = self._get_cctxt(volume.host, version)
|
||||
cctxt.cast(ctxt, 'revert_to_snapshot', volume=volume,
|
||||
snapshot=snapshot)
|
||||
|
||||
def delete_volume(self, ctxt, volume, unmanage_only=False, cascade=False):
|
||||
volume.create_worker()
|
||||
cctxt = self._get_cctxt(volume.service_topic_queue)
|
||||
|
@ -28,6 +28,7 @@
|
||||
"volume:update_readonly_flag": "rule:admin_or_owner",
|
||||
"volume:retype": "rule:admin_or_owner",
|
||||
"volume:update": "rule:admin_or_owner",
|
||||
"volume:revert_to_snapshot": "rule:admin_or_owner",
|
||||
|
||||
"volume_extension:types_manage": "rule:admin_api",
|
||||
"volume_extension:types_extra_specs": "rule:admin_api",
|
||||
|
@ -0,0 +1,3 @@
|
||||
---
|
||||
features:
|
||||
- Add revert to snapshot API and support in LVM driver.
|
Loading…
Reference in New Issue
Block a user