Backup snapshots

Today we can backup a volume, but not a snapshot.
This patch adds support to backup snapshots and
provide another layer of data protection for the
user.

DocImpact
implements blueprint backup-snapshots

Change-Id: Ib4ab9ca9dc72b30151154f3f96037f9ce3c9c540
This commit is contained in:
Xing Yang 2015-10-09 21:57:18 -04:00
parent d7f1cde362
commit dbc345729e
17 changed files with 301 additions and 61 deletions

View File

@ -263,6 +263,7 @@ class BackupsController(wsgi.Controller):
description = backup.get('description', None) description = backup.get('description', None)
incremental = backup.get('incremental', False) incremental = backup.get('incremental', False)
force = backup.get('force', False) force = backup.get('force', False)
snapshot_id = backup.get('snapshot_id', None)
LOG.info(_LI("Creating backup of volume %(volume_id)s in container" LOG.info(_LI("Creating backup of volume %(volume_id)s in container"
" %(container)s"), " %(container)s"),
{'volume_id': volume_id, 'container': container}, {'volume_id': volume_id, 'container': container},
@ -271,7 +272,8 @@ class BackupsController(wsgi.Controller):
try: try:
new_backup = self.backup_api.create(context, name, description, new_backup = self.backup_api.create(context, name, description,
volume_id, container, volume_id, container,
incremental, None, force) incremental, None, force,
snapshot_id)
except exception.InvalidVolume as error: except exception.InvalidVolume as error:
raise exc.HTTPBadRequest(explanation=error.msg) raise exc.HTTPBadRequest(explanation=error.msg)
except exception.VolumeNotFound as error: except exception.VolumeNotFound as error:

View File

@ -78,6 +78,8 @@ class ViewBuilder(common.ViewBuilder):
'links': self._get_links(request, backup['id']), 'links': self._get_links(request, backup['id']),
'is_incremental': backup.is_incremental, 'is_incremental': backup.is_incremental,
'has_dependent_backups': backup.has_dependent_backups, 'has_dependent_backups': backup.has_dependent_backups,
'snapshot_id': backup.snapshot_id,
'data_timestamp': backup.data_timestamp,
} }
} }

View File

@ -19,11 +19,14 @@
Handles all requests relating to the volume backups service. Handles all requests relating to the volume backups service.
""" """
from datetime import datetime
from eventlet import greenthread from eventlet import greenthread
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from oslo_utils import excutils from oslo_utils import excutils
from oslo_utils import strutils from oslo_utils import strutils
from pytz import timezone
from cinder.backup import rpcapi as backup_rpcapi from cinder.backup import rpcapi as backup_rpcapi
from cinder import context from cinder import context
@ -150,20 +153,28 @@ class API(base.Base):
def create(self, context, name, description, volume_id, def create(self, context, name, description, volume_id,
container, incremental=False, availability_zone=None, container, incremental=False, availability_zone=None,
force=False): force=False, snapshot_id=None):
"""Make the RPC call to create a volume backup.""" """Make the RPC call to create a volume backup."""
check_policy(context, 'create') check_policy(context, 'create')
volume = self.volume_api.get(context, volume_id) volume = self.volume_api.get(context, volume_id)
snapshot = None
if snapshot_id:
snapshot = self.volume_api.get_snapshot(context, snapshot_id)
if volume['status'] not in ["available", "in-use"]: if volume['status'] not in ["available", "in-use"]:
msg = (_('Volume to be backed up must be available ' msg = (_('Volume to be backed up must be available '
'or in-use, but the current status is "%s".') 'or in-use, but the current status is "%s".')
% volume['status']) % volume['status'])
raise exception.InvalidVolume(reason=msg) raise exception.InvalidVolume(reason=msg)
elif volume['status'] in ["in-use"] and not force: elif volume['status'] in ["in-use"] and not snapshot_id and not force:
msg = _('Backing up an in-use volume must use ' msg = _('Backing up an in-use volume must use '
'the force flag.') 'the force flag.')
raise exception.InvalidVolume(reason=msg) raise exception.InvalidVolume(reason=msg)
elif snapshot_id and snapshot['status'] not in ["available"]:
msg = (_('Snapshot to be backed up must be available, '
'but the current status is "%s".')
% snapshot['status'])
raise exception.InvalidSnapshot(reason=msg)
previous_status = volume['status'] previous_status = volume['status']
volume_host = volume_utils.extract_host(volume['host'], 'host') volume_host = volume_utils.extract_host(volume['host'], 'host')
@ -208,15 +219,36 @@ class API(base.Base):
raise exception.BackupLimitExceeded( raise exception.BackupLimitExceeded(
allowed=quotas[over]) allowed=quotas[over])
# Find the latest backup of the volume and use it as the parent # Find the latest backup and use it as the parent backup to do an
# backup to do an incremental backup. # incremental backup.
latest_backup = None latest_backup = None
if incremental: if incremental:
backups = objects.BackupList.get_all_by_volume(context.elevated(), backups = objects.BackupList.get_all_by_volume(context.elevated(),
volume_id) volume_id)
if backups.objects: if backups.objects:
latest_backup = max(backups.objects, # NOTE(xyang): The 'data_timestamp' field records the time
key=lambda x: x['created_at']) # when the data on the volume was first saved. If it is
# a backup from volume, 'data_timestamp' will be the same
# as 'created_at' for a backup. If it is a backup from a
# snapshot, 'data_timestamp' will be the same as
# 'created_at' for a snapshot.
# If not backing up from snapshot, the backup with the latest
# 'data_timestamp' will be the parent; If backing up from
# snapshot, the backup with the latest 'data_timestamp' will
# be chosen only if 'data_timestamp' is earlier than the
# 'created_at' timestamp of the snapshot; Otherwise, the
# backup will not be chosen as the parent.
# For example, a volume has a backup taken at 8:00, then
# a snapshot taken at 8:10, and then a backup at 8:20.
# When taking an incremental backup of the snapshot, the
# parent should be the backup at 8:00, not 8:20, and the
# 'data_timestamp' of this new backup will be 8:10.
latest_backup = max(
backups.objects,
key=lambda x: x['data_timestamp']
if (not snapshot or (snapshot and x['data_timestamp']
< snapshot['created_at']))
else datetime(1, 1, 1, 1, 1, 1, tzinfo=timezone('UTC')))
else: else:
msg = _('No backups available to do an incremental backup.') msg = _('No backups available to do an incremental backup.')
raise exception.InvalidBackup(reason=msg) raise exception.InvalidBackup(reason=msg)
@ -229,6 +261,11 @@ class API(base.Base):
'incremental backup.') 'incremental backup.')
raise exception.InvalidBackup(reason=msg) raise exception.InvalidBackup(reason=msg)
data_timestamp = None
if snapshot_id:
snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
data_timestamp = snapshot.created_at
self.db.volume_update(context, volume_id, self.db.volume_update(context, volume_id,
{'status': 'backing-up', {'status': 'backing-up',
'previous_status': previous_status}) 'previous_status': previous_status})
@ -244,9 +281,14 @@ class API(base.Base):
'parent_id': parent_id, 'parent_id': parent_id,
'size': volume['size'], 'size': volume['size'],
'host': volume_host, 'host': volume_host,
'snapshot_id': snapshot_id,
'data_timestamp': data_timestamp,
} }
backup = objects.Backup(context=context, **kwargs) backup = objects.Backup(context=context, **kwargs)
backup.create() backup.create()
if not snapshot_id:
backup.data_timestamp = backup.created_at
backup.save()
QUOTAS.commit(context, reservations) QUOTAS.commit(context, reservations)
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():

View File

@ -0,0 +1,40 @@
# Copyright (c) 2015 EMC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column, DateTime, MetaData, String, Table
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
backups = Table('backups', meta, autoload=True)
snapshot_id = Column('snapshot_id', String(length=36))
data_timestamp = Column('data_timestamp', DateTime)
backups.create_column(snapshot_id)
backups.update().values(snapshot_id=None).execute()
backups.create_column(data_timestamp)
backups.update().values(data_timestamp=None).execute()
# Copy existing created_at timestamp to data_timestamp
# in the backups table.
backups_list = list(backups.select().execute())
for backup in backups_list:
backup_id = backup.id
backups.update().\
where(backups.c.id == backup_id).\
values(data_timestamp=backup.created_at).execute()

View File

@ -541,6 +541,8 @@ class Backup(BASE, CinderBase):
temp_volume_id = Column(String(36)) temp_volume_id = Column(String(36))
temp_snapshot_id = Column(String(36)) temp_snapshot_id = Column(String(36))
num_dependent_backups = Column(Integer) num_dependent_backups = Column(Integer)
snapshot_id = Column(String(36))
data_timestamp = Column(DateTime)
@validates('fail_reason') @validates('fail_reason')
def validate_fail_reason(self, key, fail_reason): def validate_fail_reason(self, key, fail_reason):

View File

@ -38,7 +38,8 @@ class Backup(base.CinderPersistentObject, base.CinderObject,
# Version 1.0: Initial version # Version 1.0: Initial version
# Version 1.1: Add new field num_dependent_backups and extra fields # Version 1.1: Add new field num_dependent_backups and extra fields
# is_incremental and has_dependent_backups. # is_incremental and has_dependent_backups.
VERSION = '1.1' # Version 1.2: Add new field snapshot_id and data_timestamp.
VERSION = '1.2'
fields = { fields = {
'id': fields.UUIDField(), 'id': fields.UUIDField(),
@ -68,6 +69,8 @@ class Backup(base.CinderPersistentObject, base.CinderObject,
'temp_volume_id': fields.StringField(nullable=True), 'temp_volume_id': fields.StringField(nullable=True),
'temp_snapshot_id': fields.StringField(nullable=True), 'temp_snapshot_id': fields.StringField(nullable=True),
'num_dependent_backups': fields.IntegerField(), 'num_dependent_backups': fields.IntegerField(),
'snapshot_id': fields.StringField(nullable=True),
'data_timestamp': fields.DateTimeField(nullable=True),
} }
obj_extra_fields = ['name', 'is_incremental', 'has_dependent_backups'] obj_extra_fields = ['name', 'is_incremental', 'has_dependent_backups']

View File

@ -20,6 +20,7 @@ Tests for Backup code.
import json import json
from xml.dom import minidom from xml.dom import minidom
import ddt
import mock import mock
from oslo_utils import timeutils from oslo_utils import timeutils
import webob import webob
@ -37,7 +38,10 @@ from cinder.tests.unit import utils
# needed for stubs to work # needed for stubs to work
import cinder.volume import cinder.volume
NUM_ELEMENTS_IN_BACKUP = 17
@ddt.ddt
class BackupsAPITestCase(test.TestCase): class BackupsAPITestCase(test.TestCase):
"""Test Case for backups API.""" """Test Case for backups API."""
@ -55,11 +59,12 @@ class BackupsAPITestCase(test.TestCase):
display_description='this is a test backup', display_description='this is a test backup',
container='volumebackups', container='volumebackups',
status='creating', status='creating',
snapshot=False,
incremental=False, incremental=False,
parent_id=None, parent_id=None,
size=0, object_count=0, host='testhost', size=0, object_count=0, host='testhost',
num_dependent_backups=0): num_dependent_backups=0,
snapshot_id=None,
data_timestamp=None):
"""Create a backup object.""" """Create a backup object."""
backup = {} backup = {}
backup['volume_id'] = volume_id backup['volume_id'] = volume_id
@ -74,21 +79,35 @@ class BackupsAPITestCase(test.TestCase):
backup['fail_reason'] = '' backup['fail_reason'] = ''
backup['size'] = size backup['size'] = size
backup['object_count'] = object_count backup['object_count'] = object_count
backup['snapshot'] = snapshot
backup['incremental'] = incremental backup['incremental'] = incremental
backup['parent_id'] = parent_id backup['parent_id'] = parent_id
backup['num_dependent_backups'] = num_dependent_backups backup['num_dependent_backups'] = num_dependent_backups
return db.backup_create(context.get_admin_context(), backup)['id'] backup['snapshot_id'] = snapshot_id
backup['data_timestamp'] = data_timestamp
backup = db.backup_create(context.get_admin_context(), backup)
if not snapshot_id:
db.backup_update(context.get_admin_context(),
backup['id'],
{'data_timestamp': backup['created_at']})
return backup['id']
@staticmethod @staticmethod
def _get_backup_attrib(backup_id, attrib_name): def _get_backup_attrib(backup_id, attrib_name):
return db.backup_get(context.get_admin_context(), return db.backup_get(context.get_admin_context(),
backup_id)[attrib_name] backup_id)[attrib_name]
def test_show_backup(self): @ddt.data(False, True)
def test_show_backup(self, backup_from_snapshot):
volume_id = utils.create_volume(self.context, size=5, volume_id = utils.create_volume(self.context, size=5,
status='creating')['id'] status='creating')['id']
backup_id = self._create_backup(volume_id) snapshot = None
snapshot_id = None
if backup_from_snapshot:
snapshot = utils.create_snapshot(self.context,
volume_id)
snapshot_id = snapshot.id
backup_id = self._create_backup(volume_id,
snapshot_id=snapshot_id)
req = webob.Request.blank('/v2/fake/backups/%s' % req = webob.Request.blank('/v2/fake/backups/%s' %
backup_id) backup_id)
req.method = 'GET' req.method = 'GET'
@ -109,8 +128,11 @@ class BackupsAPITestCase(test.TestCase):
self.assertEqual(volume_id, res_dict['backup']['volume_id']) self.assertEqual(volume_id, res_dict['backup']['volume_id'])
self.assertFalse(res_dict['backup']['is_incremental']) self.assertFalse(res_dict['backup']['is_incremental'])
self.assertFalse(res_dict['backup']['has_dependent_backups']) self.assertFalse(res_dict['backup']['has_dependent_backups'])
self.assertEqual(snapshot_id, res_dict['backup']['snapshot_id'])
self.assertIn('updated_at', res_dict['backup']) self.assertIn('updated_at', res_dict['backup'])
if snapshot:
snapshot.destroy()
db.backup_destroy(context.get_admin_context(), backup_id) db.backup_destroy(context.get_admin_context(), backup_id)
db.volume_destroy(context.get_admin_context(), volume_id) db.volume_destroy(context.get_admin_context(), volume_id)
@ -283,7 +305,7 @@ class BackupsAPITestCase(test.TestCase):
res_dict = json.loads(res.body) res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int) self.assertEqual(200, res.status_int)
self.assertEqual(15, len(res_dict['backups'][0])) self.assertEqual(NUM_ELEMENTS_IN_BACKUP, len(res_dict['backups'][0]))
self.assertEqual('az1', res_dict['backups'][0]['availability_zone']) self.assertEqual('az1', res_dict['backups'][0]['availability_zone'])
self.assertEqual('volumebackups', self.assertEqual('volumebackups',
res_dict['backups'][0]['container']) res_dict['backups'][0]['container'])
@ -298,7 +320,7 @@ class BackupsAPITestCase(test.TestCase):
self.assertEqual('1', res_dict['backups'][0]['volume_id']) self.assertEqual('1', res_dict['backups'][0]['volume_id'])
self.assertIn('updated_at', res_dict['backups'][0]) self.assertIn('updated_at', res_dict['backups'][0])
self.assertEqual(15, len(res_dict['backups'][1])) self.assertEqual(NUM_ELEMENTS_IN_BACKUP, len(res_dict['backups'][1]))
self.assertEqual('az1', res_dict['backups'][1]['availability_zone']) self.assertEqual('az1', res_dict['backups'][1]['availability_zone'])
self.assertEqual('volumebackups', self.assertEqual('volumebackups',
res_dict['backups'][1]['container']) res_dict['backups'][1]['container'])
@ -313,7 +335,7 @@ class BackupsAPITestCase(test.TestCase):
self.assertEqual('1', res_dict['backups'][1]['volume_id']) self.assertEqual('1', res_dict['backups'][1]['volume_id'])
self.assertIn('updated_at', res_dict['backups'][1]) self.assertIn('updated_at', res_dict['backups'][1])
self.assertEqual(15, len(res_dict['backups'][2])) self.assertEqual(NUM_ELEMENTS_IN_BACKUP, len(res_dict['backups'][2]))
self.assertEqual('az1', res_dict['backups'][2]['availability_zone']) self.assertEqual('az1', res_dict['backups'][2]['availability_zone'])
self.assertEqual('volumebackups', res_dict['backups'][2]['container']) self.assertEqual('volumebackups', res_dict['backups'][2]['container'])
self.assertEqual('this is a test backup', self.assertEqual('this is a test backup',
@ -469,9 +491,9 @@ class BackupsAPITestCase(test.TestCase):
self.assertEqual(200, res.status_int) self.assertEqual(200, res.status_int)
self.assertEqual(2, len(res_dict['backups'])) self.assertEqual(2, len(res_dict['backups']))
self.assertEqual(15, len(res_dict['backups'][0])) self.assertEqual(NUM_ELEMENTS_IN_BACKUP, len(res_dict['backups'][0]))
self.assertEqual(backup_id3, res_dict['backups'][0]['id']) self.assertEqual(backup_id3, res_dict['backups'][0]['id'])
self.assertEqual(15, len(res_dict['backups'][1])) self.assertEqual(NUM_ELEMENTS_IN_BACKUP, len(res_dict['backups'][1]))
self.assertEqual(backup_id2, res_dict['backups'][1]['id']) self.assertEqual(backup_id2, res_dict['backups'][1]['id'])
db.backup_destroy(context.get_admin_context(), backup_id3) db.backup_destroy(context.get_admin_context(), backup_id3)
@ -492,9 +514,9 @@ class BackupsAPITestCase(test.TestCase):
self.assertEqual(200, res.status_int) self.assertEqual(200, res.status_int)
self.assertEqual(2, len(res_dict['backups'])) self.assertEqual(2, len(res_dict['backups']))
self.assertEqual(15, len(res_dict['backups'][0])) self.assertEqual(NUM_ELEMENTS_IN_BACKUP, len(res_dict['backups'][0]))
self.assertEqual(backup_id2, res_dict['backups'][0]['id']) self.assertEqual(backup_id2, res_dict['backups'][0]['id'])
self.assertEqual(15, len(res_dict['backups'][1])) self.assertEqual(NUM_ELEMENTS_IN_BACKUP, len(res_dict['backups'][1]))
self.assertEqual(backup_id1, res_dict['backups'][1]['id']) self.assertEqual(backup_id1, res_dict['backups'][1]['id'])
db.backup_destroy(context.get_admin_context(), backup_id3) db.backup_destroy(context.get_admin_context(), backup_id3)
@ -515,7 +537,7 @@ class BackupsAPITestCase(test.TestCase):
self.assertEqual(200, res.status_int) self.assertEqual(200, res.status_int)
self.assertEqual(1, len(res_dict['backups'])) self.assertEqual(1, len(res_dict['backups']))
self.assertEqual(15, len(res_dict['backups'][0])) self.assertEqual(NUM_ELEMENTS_IN_BACKUP, len(res_dict['backups'][0]))
self.assertEqual(backup_id2, res_dict['backups'][0]['id']) self.assertEqual(backup_id2, res_dict['backups'][0]['id'])
db.backup_destroy(context.get_admin_context(), backup_id3) db.backup_destroy(context.get_admin_context(), backup_id3)
@ -683,14 +705,22 @@ class BackupsAPITestCase(test.TestCase):
@mock.patch('cinder.db.service_get_all_by_topic') @mock.patch('cinder.db.service_get_all_by_topic')
@mock.patch( @mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description') 'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_create_backup_delta(self, mock_validate, @ddt.data(False, True)
def test_create_backup_delta(self, backup_from_snapshot,
mock_validate,
_mock_service_get_all_by_topic): _mock_service_get_all_by_topic):
_mock_service_get_all_by_topic.return_value = [ _mock_service_get_all_by_topic.return_value = [
{'availability_zone': "fake_az", 'host': 'test_host', {'availability_zone': "fake_az", 'host': 'test_host',
'disabled': 0, 'updated_at': timeutils.utcnow()}] 'disabled': 0, 'updated_at': timeutils.utcnow()}]
volume_id = utils.create_volume(self.context, size=5)['id'] volume_id = utils.create_volume(self.context, size=5)['id']
snapshot = None
snapshot_id = None
if backup_from_snapshot:
snapshot = utils.create_snapshot(self.context,
volume_id,
status='available')
snapshot_id = snapshot.id
backup_id = self._create_backup(volume_id, status="available") backup_id = self._create_backup(volume_id, status="available")
body = {"backup": {"display_name": "nightly001", body = {"backup": {"display_name": "nightly001",
"display_description": "display_description":
@ -698,6 +728,7 @@ class BackupsAPITestCase(test.TestCase):
"volume_id": volume_id, "volume_id": volume_id,
"container": "nightlybackups", "container": "nightlybackups",
"incremental": True, "incremental": True,
"snapshot_id": snapshot_id,
} }
} }
req = webob.Request.blank('/v2/fake/backups') req = webob.Request.blank('/v2/fake/backups')
@ -713,6 +744,8 @@ class BackupsAPITestCase(test.TestCase):
self.assertTrue(mock_validate.called) self.assertTrue(mock_validate.called)
db.backup_destroy(context.get_admin_context(), backup_id) db.backup_destroy(context.get_admin_context(), backup_id)
if snapshot:
snapshot.destroy()
db.volume_destroy(context.get_admin_context(), volume_id) db.volume_destroy(context.get_admin_context(), volume_id)
@mock.patch('cinder.db.service_get_all_by_topic') @mock.patch('cinder.db.service_get_all_by_topic')
@ -1932,7 +1965,8 @@ class BackupsAPITestCase(test.TestCase):
self.assertRaises(exception.NotSupportedOperation, self.assertRaises(exception.NotSupportedOperation,
self.backup_api.delete, self.context, backup, True) self.backup_api.delete, self.context, backup, True)
def test_show_incremental_backup(self): @ddt.data(False, True)
def test_show_incremental_backup(self, backup_from_snapshot):
volume_id = utils.create_volume(self.context, size=5)['id'] volume_id = utils.create_volume(self.context, size=5)['id']
parent_backup_id = self._create_backup(volume_id, status="available", parent_backup_id = self._create_backup(volume_id, status="available",
num_dependent_backups=1) num_dependent_backups=1)
@ -1940,9 +1974,16 @@ class BackupsAPITestCase(test.TestCase):
incremental=True, incremental=True,
parent_id=parent_backup_id, parent_id=parent_backup_id,
num_dependent_backups=1) num_dependent_backups=1)
snapshot = None
snapshot_id = None
if backup_from_snapshot:
snapshot = utils.create_snapshot(self.context,
volume_id)
snapshot_id = snapshot.id
child_backup_id = self._create_backup(volume_id, status="available", child_backup_id = self._create_backup(volume_id, status="available",
incremental=True, incremental=True,
parent_id=backup_id) parent_id=backup_id,
snapshot_id=snapshot_id)
req = webob.Request.blank('/v2/fake/backups/%s' % req = webob.Request.blank('/v2/fake/backups/%s' %
backup_id) backup_id)
@ -1954,6 +1995,7 @@ class BackupsAPITestCase(test.TestCase):
self.assertEqual(200, res.status_int) self.assertEqual(200, res.status_int)
self.assertTrue(res_dict['backup']['is_incremental']) self.assertTrue(res_dict['backup']['is_incremental'])
self.assertTrue(res_dict['backup']['has_dependent_backups']) self.assertTrue(res_dict['backup']['has_dependent_backups'])
self.assertIsNone(res_dict['backup']['snapshot_id'])
req = webob.Request.blank('/v2/fake/backups/%s' % req = webob.Request.blank('/v2/fake/backups/%s' %
parent_backup_id) parent_backup_id)
@ -1965,6 +2007,7 @@ class BackupsAPITestCase(test.TestCase):
self.assertEqual(200, res.status_int) self.assertEqual(200, res.status_int)
self.assertFalse(res_dict['backup']['is_incremental']) self.assertFalse(res_dict['backup']['is_incremental'])
self.assertTrue(res_dict['backup']['has_dependent_backups']) self.assertTrue(res_dict['backup']['has_dependent_backups'])
self.assertIsNone(res_dict['backup']['snapshot_id'])
req = webob.Request.blank('/v2/fake/backups/%s' % req = webob.Request.blank('/v2/fake/backups/%s' %
child_backup_id) child_backup_id)
@ -1976,7 +2019,11 @@ class BackupsAPITestCase(test.TestCase):
self.assertEqual(200, res.status_int) self.assertEqual(200, res.status_int)
self.assertTrue(res_dict['backup']['is_incremental']) self.assertTrue(res_dict['backup']['is_incremental'])
self.assertFalse(res_dict['backup']['has_dependent_backups']) self.assertFalse(res_dict['backup']['has_dependent_backups'])
self.assertEqual(snapshot_id, res_dict['backup']['snapshot_id'])
db.backup_destroy(context.get_admin_context(), child_backup_id) db.backup_destroy(context.get_admin_context(), child_backup_id)
db.backup_destroy(context.get_admin_context(), backup_id) db.backup_destroy(context.get_admin_context(), backup_id)
db.backup_destroy(context.get_admin_context(), parent_backup_id)
if snapshot:
snapshot.destroy()
db.volume_destroy(context.get_admin_context(), volume_id) db.volume_destroy(context.get_admin_context(), volume_id)

View File

@ -33,6 +33,8 @@ fake_backup = {
'project_id': 'fake_project', 'project_id': 'fake_project',
'temp_volume_id': None, 'temp_volume_id': None,
'temp_snapshot_id': None, 'temp_snapshot_id': None,
'snapshot_id': None,
'data_timestamp': None,
} }
@ -85,6 +87,11 @@ class TestBackup(test_objects.BaseObjectsTestCase):
self.assertEqual('2', backup.temp_volume_id) self.assertEqual('2', backup.temp_volume_id)
self.assertEqual('3', backup.temp_snapshot_id) self.assertEqual('3', backup.temp_snapshot_id)
def test_obj_field_snapshot_id(self):
backup = objects.Backup(context=self.context,
snapshot_id='2')
self.assertEqual('2', backup.snapshot_id)
def test_import_record(self): def test_import_record(self):
utils.replace_obj_loader(self, objects.Backup) utils.replace_obj_loader(self, objects.Backup)
backup = objects.Backup(context=self.context, id=1, parent_id=None, backup = objects.Backup(context=self.context, id=1, parent_id=None,

View File

@ -21,8 +21,8 @@ from cinder import test
# NOTE: The hashes in this list should only be changed if they come with a # NOTE: The hashes in this list should only be changed if they come with a
# corresponding version bump in the affected objects. # corresponding version bump in the affected objects.
object_data = { object_data = {
'Backup': '1.1-cd077ec037f5ad1f5409fd660bd59f53', 'Backup': '1.2-62c3da6df3dccb76796e4da65a45a44f',
'BackupImport': '1.1-cd077ec037f5ad1f5409fd660bd59f53', 'BackupImport': '1.2-62c3da6df3dccb76796e4da65a45a44f',
'BackupList': '1.0-24591dabe26d920ce0756fe64cd5f3aa', 'BackupList': '1.0-24591dabe26d920ce0756fe64cd5f3aa',
'CGSnapshot': '1.0-190da2a2aa9457edc771d888f7d225c4', 'CGSnapshot': '1.0-190da2a2aa9457edc771d888f7d225c4',
'CGSnapshotList': '1.0-e8c3f4078cd0ee23487b34d173eec776', 'CGSnapshotList': '1.0-e8c3f4078cd0ee23487b34d173eec776',

View File

@ -1901,7 +1901,8 @@ class DBAPIBackupTestCase(BaseTest):
"""Tests for db.api.backup_* methods.""" """Tests for db.api.backup_* methods."""
_ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at', 'updated_at'] _ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at',
'updated_at', 'data_timestamp']
def setUp(self): def setUp(self):
super(DBAPIBackupTestCase, self).setUp() super(DBAPIBackupTestCase, self).setUp()
@ -1927,7 +1928,8 @@ class DBAPIBackupTestCase(BaseTest):
'object_count': 100, 'object_count': 100,
'temp_volume_id': 'temp_volume_id', 'temp_volume_id': 'temp_volume_id',
'temp_snapshot_id': 'temp_snapshot_id', 'temp_snapshot_id': 'temp_snapshot_id',
'num_dependent_backups': 0, } 'num_dependent_backups': 0,
'snapshot_id': 'snapshot_id', }
if one: if one:
return base_values return base_values

View File

@ -710,6 +710,13 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
self.assertIsInstance(private_data.c.last_used.type, self.assertIsInstance(private_data.c.last_used.type,
self.TIME_TYPE) self.TIME_TYPE)
def _check_061(self, engine, data):
backups = db_utils.get_table(engine, 'backups')
self.assertIsInstance(backups.c.snapshot_id.type,
sqlalchemy.types.VARCHAR)
self.assertIsInstance(backups.c.data_timestamp.type,
self.TIME_TYPE)
def test_walk_versions(self): def test_walk_versions(self):
self.walk_versions(False, False) self.walk_versions(False, False)

View File

@ -385,6 +385,7 @@ class NotifyUsageTestCase(test.TestCase):
'fail_reason': None, 'fail_reason': None,
'parent_id': 'fake_parent_id', 'parent_id': 'fake_parent_id',
'num_dependent_backups': 0, 'num_dependent_backups': 0,
'snapshot_id': None,
} }
# Make it easier to find out differences between raw and expected. # Make it easier to find out differences between raw and expected.

View File

@ -171,7 +171,9 @@ def create_backup(ctxt,
status='creating', status='creating',
parent_id=None, parent_id=None,
temp_volume_id=None, temp_volume_id=None,
temp_snapshot_id=None): temp_snapshot_id=None,
snapshot_id=None,
data_timestamp=None):
backup = {} backup = {}
backup['volume_id'] = volume_id backup['volume_id'] = volume_id
backup['user_id'] = ctxt.user_id backup['user_id'] = ctxt.user_id
@ -189,6 +191,8 @@ def create_backup(ctxt,
backup['object_count'] = 22 backup['object_count'] = 22
backup['temp_volume_id'] = temp_volume_id backup['temp_volume_id'] = temp_volume_id
backup['temp_snapshot_id'] = temp_snapshot_id backup['temp_snapshot_id'] = temp_snapshot_id
backup['snapshot_id'] = snapshot_id
backup['data_timestamp'] = data_timestamp
return db.backup_create(ctxt, backup) return db.backup_create(ctxt, backup)

View File

@ -1073,6 +1073,15 @@ class BaseVD(object):
def backup_volume(self, context, backup, backup_service): def backup_volume(self, context, backup, backup_service):
"""Create a new backup from an existing volume.""" """Create a new backup from an existing volume."""
# NOTE(xyang): _backup_volume_temp_snapshot and
# _backup_volume_temp_volume are splitted into two
# functions because there were concerns during code
# reviews that it is confusing to put all the logic
# into one function. There's a trade-off between
# reducing code duplication and increasing code
# readability here. Added a note here to explain why
# we've decided to have two separate functions as
# there will always be arguments from both sides.
if self.backup_use_temp_snapshot(): if self.backup_use_temp_snapshot():
self._backup_volume_temp_snapshot(context, backup, self._backup_volume_temp_snapshot(context, backup,
backup_service) backup_service)
@ -1081,28 +1090,47 @@ class BaseVD(object):
backup_service) backup_service)
def _backup_volume_temp_volume(self, context, backup, backup_service): def _backup_volume_temp_volume(self, context, backup, backup_service):
"""Create a new backup from an existing volume. """Create a new backup from an existing volume or snapshot.
For in-use volume, create a temp volume and back it up. To backup a snapshot, create a temp volume from the snapshot and
back it up.
Otherwise to backup an in-use volume, create a temp volume and
back it up.
""" """
volume = self.db.volume_get(context, backup.volume_id) volume = self.db.volume_get(context, backup.volume_id)
snapshot = None
if backup.snapshot_id:
snapshot = objects.Snapshot.get_by_id(context, backup.snapshot_id)
LOG.debug('Creating a new backup for volume %s.', volume['name']) LOG.debug('Creating a new backup for volume %s.', volume['name'])
# NOTE(xyang): Check volume status; if 'in-use', create a temp
# volume from the source volume, backup the temp volume, and
# then clean up the temp volume; if 'available', just backup the
# volume.
previous_status = volume.get('previous_status', None)
device_to_backup = volume
temp_vol_ref = None temp_vol_ref = None
if previous_status == "in-use": device_to_backup = volume
temp_vol_ref = self._create_temp_cloned_volume(
context, volume) # NOTE(xyang): If it is to backup from snapshot, create a temp
# volume from the source snapshot, backup the temp volume, and
# then clean up the temp volume.
if snapshot:
temp_vol_ref = self._create_temp_volume_from_snapshot(
context, volume, snapshot)
backup.temp_volume_id = temp_vol_ref['id'] backup.temp_volume_id = temp_vol_ref['id']
backup.save() backup.save()
device_to_backup = temp_vol_ref device_to_backup = temp_vol_ref
else:
# NOTE(xyang): Check volume status if it is not to backup from
# snapshot; if 'in-use', create a temp volume from the source
# volume, backup the temp volume, and then clean up the temp
# volume; if 'available', just backup the volume.
previous_status = volume.get('previous_status')
if previous_status == "in-use":
temp_vol_ref = self._create_temp_cloned_volume(
context, volume)
backup.temp_volume_id = temp_vol_ref['id']
backup.save()
device_to_backup = temp_vol_ref
self._backup_device(context, backup, backup_service, device_to_backup) self._backup_device(context, backup, backup_service, device_to_backup)
if temp_vol_ref: if temp_vol_ref:
@ -1111,29 +1139,43 @@ class BaseVD(object):
backup.save() backup.save()
def _backup_volume_temp_snapshot(self, context, backup, backup_service): def _backup_volume_temp_snapshot(self, context, backup, backup_service):
"""Create a new backup from an existing volume. """Create a new backup from an existing volume or snapshot.
For in-use volume, create a temp snapshot and back it up. If it is to backup from snapshot, back it up directly.
Otherwise for in-use volume, create a temp snapshot and back it up.
""" """
volume = self.db.volume_get(context, backup.volume_id) volume = self.db.volume_get(context, backup.volume_id)
snapshot = None
if backup.snapshot_id:
snapshot = objects.Snapshot.get_by_id(context, backup.snapshot_id)
LOG.debug('Creating a new backup for volume %s.', volume['name']) LOG.debug('Creating a new backup for volume %s.', volume['name'])
# NOTE(xyang): Check volume status; if 'in-use', create a temp
# snapshot from the source volume, backup the temp snapshot, and
# then clean up the temp snapshot; if 'available', just backup the
# volume.
previous_status = volume.get('previous_status', None)
device_to_backup = volume device_to_backup = volume
is_snapshot = False is_snapshot = False
temp_snapshot = None temp_snapshot = None
if previous_status == "in-use":
temp_snapshot = self._create_temp_snapshot(context, volume) # NOTE(xyang): If it is to backup from snapshot, back it up
backup.temp_snapshot_id = temp_snapshot.id # directly. No need to clean it up.
backup.save() if snapshot:
device_to_backup = temp_snapshot device_to_backup = snapshot
is_snapshot = True is_snapshot = True
else:
# NOTE(xyang): If it is not to backup from snapshot, check volume
# status. If the volume status is 'in-use', create a temp snapshot
# from the source volume, backup the temp snapshot, and then clean
# up the temp snapshot; if the volume status is 'available', just
# backup the volume.
previous_status = volume.get('previous_status')
if previous_status == "in-use":
temp_snapshot = self._create_temp_snapshot(context, volume)
backup.temp_snapshot_id = temp_snapshot.id
backup.save()
device_to_backup = temp_snapshot
is_snapshot = True
self._backup_device(context, backup, backup_service, device_to_backup, self._backup_device(context, backup, backup_service, device_to_backup,
is_snapshot) is_snapshot)
@ -1255,6 +1297,27 @@ class BaseVD(object):
{'status': 'available'}) {'status': 'available'})
return temp_vol_ref return temp_vol_ref
def _create_temp_volume_from_snapshot(self, context, volume, snapshot):
temp_volume = {
'size': volume['size'],
'display_name': 'backup-vol-%s' % volume['id'],
'host': volume['host'],
'user_id': context.user_id,
'project_id': context.project_id,
'status': 'creating',
}
temp_vol_ref = self.db.volume_create(context, temp_volume)
try:
self.create_volume_from_snapshot(temp_vol_ref, snapshot)
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_destroy(context.elevated(),
temp_vol_ref['id'])
self.db.volume_update(context, temp_vol_ref['id'],
{'status': 'available'})
return temp_vol_ref
def _delete_temp_snapshot(self, context, snapshot): def _delete_temp_snapshot(self, context, snapshot):
self.delete_snapshot(snapshot) self.delete_snapshot(snapshot)
with snapshot.obj_as_admin(): with snapshot.obj_as_admin():

View File

@ -31,6 +31,7 @@ from cinder.brick.local_dev import lvm as lvm
from cinder import exception from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import image_utils from cinder.image import image_utils
from cinder import objects
from cinder import utils from cinder import utils
from cinder.volume import driver from cinder.volume import driver
from cinder.volume import utils as volutils from cinder.volume import utils as volutils
@ -505,15 +506,28 @@ class LVMVolumeDriver(driver.VolumeDriver):
def backup_volume(self, context, backup, backup_service): def backup_volume(self, context, backup, backup_service):
"""Create a new backup from an existing volume.""" """Create a new backup from an existing volume."""
volume = self.db.volume_get(context, backup.volume_id) volume = self.db.volume_get(context, backup.volume_id)
snapshot = None
if backup.snapshot_id:
snapshot = objects.Snapshot.get_by_id(context, backup.snapshot_id)
temp_snapshot = None temp_snapshot = None
previous_status = volume['previous_status'] # NOTE(xyang): If it is to backup from snapshot, back it up
if previous_status == 'in-use': # directly. No need to clean it up.
temp_snapshot = self._create_temp_snapshot(context, volume) if snapshot:
backup.temp_snapshot_id = temp_snapshot.id volume_path = self.local_path(snapshot)
backup.save()
volume_path = self.local_path(temp_snapshot)
else: else:
volume_path = self.local_path(volume) # NOTE(xyang): If it is not to backup from snapshot, check volume
# status. If the volume status is 'in-use', create a temp snapshot
# from the source volume, backup the temp snapshot, and then clean
# up the temp snapshot; if the volume status is 'available', just
# backup the volume.
previous_status = volume.get('previous_status', None)
if previous_status == "in-use":
temp_snapshot = self._create_temp_snapshot(context, volume)
backup.temp_snapshot_id = temp_snapshot.id
backup.save()
volume_path = self.local_path(temp_snapshot)
else:
volume_path = self.local_path(volume)
try: try:
with utils.temporary_chown(volume_path): with utils.temporary_chown(volume_path):

View File

@ -108,6 +108,7 @@ def _usage_from_backup(backup_ref, **kw):
fail_reason=backup_ref['fail_reason'], fail_reason=backup_ref['fail_reason'],
parent_id=backup_ref['parent_id'], parent_id=backup_ref['parent_id'],
num_dependent_backups=num_dependent_backups, num_dependent_backups=num_dependent_backups,
snapshot_id=backup_ref['snapshot_id'],
) )
usage_info.update(kw) usage_info.update(kw)

View File

@ -0,0 +1,3 @@
---
features:
- Backup snapshots.