Merge "Support metadata for backup resource"

This commit is contained in:
Jenkins 2017-07-27 02:13:15 +00:00 committed by Gerrit Code Review
commit 48630526e2
22 changed files with 461 additions and 46 deletions

View File

@ -134,6 +134,7 @@ class BackupsController(wsgi.Controller):
context = req.environ['cinder.context']
backup = body['backup']
req_version = req.api_version_request
try:
volume_id = backup['volume_id']
@ -150,6 +151,8 @@ class BackupsController(wsgi.Controller):
incremental = backup.get('incremental', False)
force = backup.get('force', False)
snapshot_id = backup.get('snapshot_id', None)
metadata = backup.get(
'metadata', None) if req_version.matches("3.43") else None
LOG.info("Creating backup of volume %(volume_id)s in container"
" %(container)s",
{'volume_id': volume_id, 'container': container},
@ -159,9 +162,11 @@ class BackupsController(wsgi.Controller):
new_backup = self.backup_api.create(context, name, description,
volume_id, container,
incremental, None, force,
snapshot_id)
snapshot_id, metadata)
except (exception.InvalidVolume,
exception.InvalidSnapshot) as error:
exception.InvalidSnapshot,
exception.InvalidVolumeMetadata,
exception.InvalidVolumeMetadataSize) as error:
raise exc.HTTPBadRequest(explanation=error.msg)
# Other not found exceptions will be handled at the wsgi level
except exception.ServiceNotFound as error:

View File

@ -105,6 +105,7 @@ REST_API_VERSION_HISTORY = """
Administrator can disable this ability by updating the
'volume:extend_attached_volume' policy rule. Extend in reserved
state is intentionally NOT allowed.
* 3.43 - Support backup CRUD with metadata.
"""
# The minimum and maximum versions of the API supported
@ -112,7 +113,7 @@ REST_API_VERSION_HISTORY = """
# minimum version of the API supported.
# Explicitly using /v1 or /v2 endpoints will still work
_MIN_API_VERSION = "3.0"
_MAX_API_VERSION = "3.42"
_MAX_API_VERSION = "3.43"
_LEGACY_API_VERSION1 = "1.0"
_LEGACY_API_VERSION2 = "2.0"

View File

@ -361,3 +361,7 @@ user documentation.
Administrator can disable this ability by updating the
``volume:extend_attached_volume`` policy rule. Extend of a resered
Volume is NOT allowed.
3.43
----
Support backup CRUD with metadata.

View File

@ -39,6 +39,7 @@ class BackupsController(backups_v2.BackupsController):
"""Update a backup."""
context = req.environ['cinder.context']
self.assert_valid_body(body, 'backup')
req_version = req.api_version_request
backup_update = body['backup']
@ -49,6 +50,8 @@ class BackupsController(backups_v2.BackupsController):
if 'description' in backup_update:
update_dict['display_description'] = (
backup_update.pop('description'))
if req_version.matches('3.43') and 'metadata' in backup_update:
update_dict['metadata'] = backup_update.pop('metadata')
# Check no unsupported fields.
if backup_update:
msg = _("Unsupported fields %s.") % (", ".join(backup_update))

View File

@ -56,7 +56,7 @@ class ViewBuilder(common.ViewBuilder):
def detail(self, request, backup):
"""Detailed view of a single backup."""
return {
backup_dict = {
'backup': {
'id': backup.get('id'),
'status': backup.get('status'),
@ -77,6 +77,10 @@ class ViewBuilder(common.ViewBuilder):
'data_timestamp': backup.data_timestamp,
}
}
req_version = request.api_version_request
if req_version.matches("3.43"):
backup_dict['backup']['metadata'] = backup.metadata
return backup_dict
def _list_view(self, func, request, backups, backup_count):
"""Provide a view for a list of backups."""

View File

@ -39,6 +39,7 @@ from cinder.objects import fields
import cinder.policy
from cinder import quota
from cinder import quota_utils
from cinder import utils
import cinder.volume
from cinder.volume import utils as volume_utils
@ -201,9 +202,10 @@ class API(base.Base):
def create(self, context, name, description, volume_id,
container, incremental=False, availability_zone=None,
force=False, snapshot_id=None):
force=False, snapshot_id=None, metadata=None):
"""Make the RPC call to create a volume backup."""
check_policy(context, 'create')
utils.check_metadata_properties(metadata)
volume = self.volume_api.get(context, volume_id)
snapshot = None
if snapshot_id:
@ -314,6 +316,7 @@ class API(base.Base):
'host': host,
'snapshot_id': snapshot_id,
'data_timestamp': data_timestamp,
'metadata': metadata or {}
}
backup = objects.Backup(context=context, **kwargs)
backup.create()
@ -495,6 +498,7 @@ class API(base.Base):
'project_id': context.project_id,
'volume_id': IMPORT_VOLUME_ID,
'status': fields.BackupStatus.CREATING,
'metadata': {}
}
try:

View File

@ -1183,6 +1183,14 @@ def backup_create(context, values):
return IMPL.backup_create(context, values)
def backup_metadata_get(context, backup_id):
return IMPL.backup_metadata_get(context, backup_id)
def backup_metadata_update(context, backup_id, metadata, delete):
return IMPL.backup_metadata_update(context, backup_id, metadata, delete)
def backup_get_all_by_project(context, project_id, filters=None, marker=None,
limit=None, offset=None, sort_keys=None,
sort_dirs=None):

View File

@ -226,6 +226,21 @@ def require_snapshot_exists(f):
return wrapper
def require_backup_exists(f):
"""Decorator to require the specified snapshot to exist.
Requires the wrapped function to use context and backup_id as
their first two arguments.
"""
@functools.wraps(f)
def wrapper(context, backup_id, *args, **kwargs):
if not resource_exists(context, models.Backup, backup_id):
raise exception.BackupNotFound(backup_id=backup_id)
return f(context, backup_id, *args, **kwargs)
return wrapper
def _retry_on_deadlock(f):
"""Decorator to retry a DB API call if Deadlock was received."""
@functools.wraps(f)
@ -4954,11 +4969,10 @@ def backup_get(context, backup_id, read_deleted=None, project_only=True):
def _backup_get(context, backup_id, session=None, read_deleted=None,
project_only=True):
result = model_query(context, models.Backup, session=session,
project_only=project_only,
read_deleted=read_deleted).\
filter_by(id=backup_id).\
first()
result = model_query(
context, models.Backup, session=session, project_only=project_only,
read_deleted=read_deleted).options(
joinedload('backup_metadata')).filter_by(id=backup_id).first()
if not result:
raise exception.BackupNotFound(backup_id=backup_id)
@ -4983,8 +4997,9 @@ def _backup_get_all(context, filters=None, marker=None, limit=None,
def _backups_get_query(context, session=None, project_only=False):
return model_query(context, models.Backup, session=session,
project_only=project_only)
return model_query(
context, models.Backup, session=session,
project_only=project_only).options(joinedload('backup_metadata'))
@apply_like_filters(model=models.Backup)
@ -4993,7 +5008,18 @@ def _process_backups_filters(query, filters):
# Ensure that filters' keys exist on the model
if not is_valid_model_filters(models.Backup, filters):
return
query = query.filter_by(**filters)
filters_dict = {}
for key, value in filters.items():
if key == 'metadata':
col_attr = getattr(models.Snapshot, 'snapshot_metadata')
for k, v in value.items():
query = query.filter(col_attr.any(key=k, value=v))
else:
filters_dict[key] = value
# Apply exact matches
if filters_dict:
query = query.filter_by(**filters_dict)
return query
@ -5006,7 +5032,9 @@ def backup_get_all(context, filters=None, marker=None, limit=None,
@require_admin_context
def backup_get_all_by_host(context, host):
return model_query(context, models.Backup).filter_by(host=host).all()
return model_query(
context, models.Backup).options(
joinedload('backup_metadata')).filter_by(host=host).all()
@require_context
@ -5044,7 +5072,8 @@ def backup_get_all_by_volume(context, volume_id, filters=None):
def backup_get_all_active_by_window(context, begin, end=None, project_id=None):
"""Return backups that were active during window."""
query = model_query(context, models.Backup, read_deleted="yes")
query = model_query(context, models.Backup, read_deleted="yes").options(
joinedload('backup_metadata'))
query = query.filter(or_(models.Backup.deleted_at == None, # noqa
models.Backup.deleted_at > begin))
if end:
@ -5058,15 +5087,18 @@ def backup_get_all_active_by_window(context, begin, end=None, project_id=None):
@handle_db_data_error
@require_context
def backup_create(context, values):
backup = models.Backup()
values['backup_metadata'] = _metadata_refs(values.get('metadata'),
models.BackupMetadata)
if not values.get('id'):
values['id'] = str(uuid.uuid4())
backup.update(values)
session = get_session()
with session.begin():
backup.save(session)
return backup
backup_ref = models.Backup()
backup_ref.update(values)
session.add(backup_ref)
return _backup_get(context, values['id'], session=session)
@handle_db_data_error
@ -5083,17 +5115,99 @@ def backup_update(context, backup_id, values):
@require_admin_context
def backup_destroy(context, backup_id):
utcnow = timeutils.utcnow()
updated_values = {'status': fields.BackupStatus.DELETED,
'deleted': True,
'deleted_at': timeutils.utcnow(),
'deleted_at': utcnow,
'updated_at': literal_column('updated_at')}
model_query(context, models.Backup).\
session = get_session()
with session.begin():
model_query(context, models.Backup, session=session).\
filter_by(id=backup_id).\
update(updated_values)
model_query(context, models.BackupMetadata, session=session).\
filter_by(backup_id=backup_id).\
update({'deleted': True,
'deleted_at': utcnow,
'updated_at': literal_column('updated_at')})
del updated_values['updated_at']
return updated_values
@require_context
@require_backup_exists
def backup_metadata_get(context, backup_id):
return _backup_metadata_get(context, backup_id)
@require_context
def _backup_metadata_get(context, backup_id, session=None):
rows = _backup_metadata_get_query(context, backup_id, session).all()
result = {}
for row in rows:
result[row['key']] = row['value']
return result
def _backup_metadata_get_query(context, backup_id, session=None):
return model_query(
context, models.BackupMetadata,
session=session, read_deleted="no").filter_by(backup_id=backup_id)
@require_context
def _backup_metadata_get_item(context, backup_id, key, session=None):
result = _backup_metadata_get_query(
context, backup_id, session=session).filter_by(key=key).first()
if not result:
raise exception.BackupMetadataNotFound(metadata_key=key,
backup_id=backup_id)
return result
@require_context
@require_backup_exists
@handle_db_data_error
@_retry_on_deadlock
def backup_metadata_update(context, backup_id, metadata, delete):
session = get_session()
with session.begin():
# Set existing metadata to deleted if delete argument is True
if delete:
original_metadata = _backup_metadata_get(context, backup_id,
session)
for meta_key, meta_value in original_metadata.items():
if meta_key not in metadata:
meta_ref = _backup_metadata_get_item(context,
backup_id,
meta_key, session)
meta_ref.update({'deleted': True,
'deleted_at': timeutils.utcnow()})
meta_ref.save(session=session)
meta_ref = None
# Now update all existing items with new values, or create new meta
# objects
for meta_key, meta_value in metadata.items():
# update the value whether it exists or not
item = {"value": meta_value}
try:
meta_ref = _backup_metadata_get_item(context, backup_id,
meta_key, session)
except exception.BackupMetadataNotFound:
meta_ref = models.BackupMetadata()
item.update({"key": meta_key, "backup_id": backup_id})
meta_ref.update(item)
meta_ref.save(session=session)
return backup_metadata_get(context, backup_id)
###############################
@ -5868,6 +5982,11 @@ def is_valid_model_filters(model, filters, exclude_list=None):
for key in filters.keys():
if exclude_list and key in exclude_list:
continue
if key == 'metadata':
if not isinstance(filters[key], dict):
LOG.debug("Metadata filter value is not valid dictionary")
return False
continue
try:
key = key.rstrip('~')
getattr(model, key)

View File

@ -0,0 +1,50 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_db.sqlalchemy import utils
from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer
from sqlalchemy import MetaData, String, Table
def upgrade(migrate_engine):
"""Add backup_metadata table."""
meta = MetaData()
meta.bind = migrate_engine
Table('backups', meta, autoload=True)
backup_metadata = Table(
'backup_metadata', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(), default=False),
Column('id', Integer, primary_key=True, nullable=False),
Column('backup_id', String(36),
ForeignKey('backups.id'),
nullable=False),
Column('key', String(255)),
Column('value', String(255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
backup_metadata.create()
if not utils.index_exists_on_columns(migrate_engine,
'backup_metadata',
['backup_id']):
utils.add_index(migrate_engine,
'backup_metadata',
'backup_metadata_backup_id_idx',
['backup_id'])

View File

@ -756,6 +756,20 @@ class Backup(BASE, CinderBase):
return fail_reason and fail_reason[:255] or ''
class BackupMetadata(BASE, CinderBase):
"""Represents a metadata key/value pair for a backup."""
__tablename__ = 'backup_metadata'
id = Column(Integer, primary_key=True)
key = Column(String(255))
value = Column(String(255))
backup_id = Column(String(36), ForeignKey('backups.id'), nullable=False)
backup = relationship(Backup, backref="backup_metadata",
foreign_keys=backup_id,
primaryjoin='and_('
'BackupMetadata.backup_id == Backup.id,'
'BackupMetadata.deleted == False)')
class Encryption(BASE, CinderBase):
"""Represents encryption requirement for a volume type.

View File

@ -753,6 +753,11 @@ class BackupMetadataUnsupportedVersion(BackupDriverException):
message = _("Unsupported backup metadata version requested")
class BackupMetadataNotFound(NotFound):
message = _("Backup %(backup_id)s has no metadata with "
"key %(metadata_key)s.")
class BackupVerifyUnsupportedDriver(BackupDriverException):
message = _("Unsupported backup verify driver")

View File

@ -38,7 +38,10 @@ class Backup(base.CinderPersistentObject, base.CinderObject,
# Version 1.2: Add new field snapshot_id and data_timestamp.
# Version 1.3: Changed 'status' field to use BackupStatusField
# Version 1.4: Add restore_volume_id
VERSION = '1.4'
# Version 1.5: Add metadata
VERSION = '1.5'
OPTIONAL_FIELDS = ('metadata',)
fields = {
'id': fields.UUIDField(),
@ -71,10 +74,26 @@ class Backup(base.CinderPersistentObject, base.CinderObject,
'snapshot_id': fields.StringField(nullable=True),
'data_timestamp': fields.DateTimeField(nullable=True),
'restore_volume_id': fields.StringField(nullable=True),
'metadata': fields.DictOfStringsField(nullable=True),
}
obj_extra_fields = ['name', 'is_incremental', 'has_dependent_backups']
def __init__(self, *args, **kwargs):
super(Backup, self).__init__(*args, **kwargs)
self._orig_metadata = {}
self._reset_metadata_tracking()
def _reset_metadata_tracking(self, fields=None):
if fields is None or 'metadata' in fields:
self._orig_metadata = (dict(self.metadata)
if self.obj_attr_is_set('metadata') else {})
@classmethod
def _get_expected_attrs(cls, context, *args, **kwargs):
return 'metadata',
@property
def name(self):
return CONF.backup_name_template % self.id
@ -92,18 +111,50 @@ class Backup(base.CinderPersistentObject, base.CinderObject,
super(Backup, self).obj_make_compatible(primitive, target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
@staticmethod
def _from_db_object(context, backup, db_backup):
@classmethod
def _from_db_object(cls, context, backup, db_backup, expected_attrs=None):
if expected_attrs is None:
expected_attrs = []
for name, field in backup.fields.items():
if name in cls.OPTIONAL_FIELDS:
continue
value = db_backup.get(name)
if isinstance(field, fields.IntegerField):
value = value if value is not None else 0
backup[name] = value
if 'metadata' in expected_attrs:
metadata = db_backup.get('backup_metadata')
if metadata is None:
raise exception.MetadataAbsent()
backup.metadata = {item['key']: item['value']
for item in metadata}
backup._context = context
backup.obj_reset_changes()
return backup
def obj_reset_changes(self, fields=None):
super(Backup, self).obj_reset_changes(fields)
self._reset_metadata_tracking(fields=fields)
def obj_load_attr(self, attrname):
if attrname not in self.OPTIONAL_FIELDS:
raise exception.ObjectActionError(
action='obj_load_attr',
reason=_('attribute %s not lazy-loadable') % attrname)
if not self._context:
raise exception.OrphanedObjectError(method='obj_load_attr',
objtype=self.obj_name())
self.obj_reset_changes(fields=[attrname])
def obj_what_changed(self):
changes = super(Backup, self).obj_what_changed()
if hasattr(self, 'metadata') and self.metadata != self._orig_metadata:
changes.add('metadata')
return changes
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
@ -116,6 +167,11 @@ class Backup(base.CinderPersistentObject, base.CinderObject,
def save(self):
updates = self.cinder_obj_get_changes()
if updates:
if 'metadata' in updates:
metadata = updates.pop('metadata', None)
self.metadata = db.backup_metadata_update(self._context,
self.id, metadata,
True)
db.backup_update(self._context, self.id, updates)
self.obj_reset_changes()
@ -166,14 +222,16 @@ class BackupList(base.ObjectListBase, base.CinderObject):
offset=None, sort_keys=None, sort_dirs=None):
backups = db.backup_get_all(context, filters, marker, limit, offset,
sort_keys, sort_dirs)
expected_attrs = Backup._get_expected_attrs(context)
return base.obj_make_list(context, cls(context), objects.Backup,
backups)
backups, expected_attrs=expected_attrs)
@classmethod
def get_all_by_host(cls, context, host):
backups = db.backup_get_all_by_host(context, host)
expected_attrs = Backup._get_expected_attrs(context)
return base.obj_make_list(context, cls(context), objects.Backup,
backups)
backups, expected_attrs=expected_attrs)
@classmethod
def get_all_by_project(cls, context, project_id, filters=None,
@ -182,20 +240,23 @@ class BackupList(base.ObjectListBase, base.CinderObject):
backups = db.backup_get_all_by_project(context, project_id, filters,
marker, limit, offset,
sort_keys, sort_dirs)
expected_attrs = Backup._get_expected_attrs(context)
return base.obj_make_list(context, cls(context), objects.Backup,
backups)
backups, expected_attrs=expected_attrs)
@classmethod
def get_all_by_volume(cls, context, volume_id, filters=None):
backups = db.backup_get_all_by_volume(context, volume_id, filters)
expected_attrs = Backup._get_expected_attrs(context)
return base.obj_make_list(context, cls(context), objects.Backup,
backups)
backups, expected_attrs=expected_attrs)
@classmethod
def get_all_active_by_window(cls, context, begin, end):
backups = db.backup_get_all_active_by_window(context, begin, end)
expected_attrs = Backup._get_expected_attrs(context)
return base.obj_make_list(context, cls(context), objects.Backup,
backups)
backups, expected_attrs=expected_attrs)
@base.CinderObjectRegistry.register

View File

@ -134,6 +134,7 @@ OBJ_VERSIONS.add('1.23', {'VolumeAttachment': '1.2'})
OBJ_VERSIONS.add('1.24', {'LogLevel': '1.0', 'LogLevelList': '1.0'})
OBJ_VERSIONS.add('1.25', {'Group': '1.2'})
OBJ_VERSIONS.add('1.26', {'Snapshot': '1.5'})
OBJ_VERSIONS.add('1.27', {'Backup': '1.5', 'BackupImport': '1.5'})
class CinderObjectRegistry(base.VersionedObjectRegistry):

View File

@ -260,10 +260,7 @@ class AdminActionsTest(BaseAdminTest):
def test_backup_reset_status_as_non_admin(self):
ctx = context.RequestContext(fake.USER_ID, fake.PROJECT_ID)
backup = db.backup_create(ctx, {'status': 'available',
'size': 1,
'volume_id': "fakeid",
'host': 'test'})
backup = test_utils.create_backup(ctx, status='available')
resp = self._issue_backup_reset(ctx,
backup,
{'status': fields.BackupStatus.ERROR})

View File

@ -25,6 +25,7 @@ from six.moves import http_client
import webob
from cinder.api.contrib import backups
from cinder.api.openstack import api_version_request as api_version
# needed for stubs to work
import cinder.backup
from cinder.backup import api as backup_api
@ -109,6 +110,23 @@ class BackupsAPITestCase(test.TestCase):
backup.destroy()
volume.destroy()
def test_show_backup_return_metadata(self):
volume = utils.create_volume(self.context, size=5, status='creating')
backup = utils.create_backup(self.context, volume.id,
metadata={"test_key": "test_value"})
req = webob.Request.blank('/v3/%s/backups/%s' % (
fake.PROJECT_ID, backup.id))
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
req.headers['OpenStack-API-Version'] = 'volume 3.43'
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_context))
res_dict = jsonutils.loads(res.body)
self.assertEqual({"test_key": "test_value"},
res_dict['backup']['metadata'])
volume.destroy()
backup.destroy()
def test_show_backup_with_backup_NotFound(self):
req = webob.Request.blank('/v2/%s/backups/%s' % (
fake.PROJECT_ID, fake.WILL_NOT_BE_FOUND_ID))
@ -303,6 +321,33 @@ class BackupsAPITestCase(test.TestCase):
backup2.destroy()
backup1.destroy()
def test_list_backups_detail_return_metadata(self):
backup1 = utils.create_backup(self.context, size=1,
metadata={'key1': 'value1'})
backup2 = utils.create_backup(self.context, size=1,
metadata={'key2': 'value2'})
backup3 = utils.create_backup(self.context, size=1)
req = webob.Request.blank('/v3/%s/backups/detail' % fake.PROJECT_ID)
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
req.headers['Accept'] = 'application/json'
req.headers['OpenStack-API-Version'] = 'volume 3.43'
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_context))
res_dict = jsonutils.loads(res.body)
self.assertEqual({'key1': 'value1'},
res_dict['backups'][2]['metadata'])
self.assertEqual({'key2': 'value2'},
res_dict['backups'][1]['metadata'])
self.assertEqual({},
res_dict['backups'][0]['metadata'])
backup3.destroy()
backup2.destroy()
backup1.destroy()
def test_list_backups_detail_using_filters(self):
backup1 = utils.create_backup(self.context, display_name='test2')
backup2 = utils.create_backup(self.context,
@ -470,6 +515,48 @@ class BackupsAPITestCase(test.TestCase):
volume.destroy()
@mock.patch('cinder.db.service_get_all')
@mock.patch(
'cinder.api.openstack.wsgi.Controller.validate_name_and_description')
def test_create_backup_with_metadata(self, mock_validate,
_mock_service_get_all):
_mock_service_get_all.return_value = [
{'availability_zone': 'fake_az', 'host': 'testhost',
'disabled': 0, 'updated_at': timeutils.utcnow()}]
volume = utils.create_volume(self.context, size=1)
# Create a backup with metadata
body = {"backup": {"display_name": "nightly001",
"display_description":
"Nightly Backup 03-Sep-2012",
"volume_id": volume.id,
"container": "nightlybackups",
'metadata': {'test_key': 'test_value'}
}
}
req = webob.Request.blank('/v3/%s/backups' % fake.PROJECT_ID)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.headers['OpenStack-API-Version'] = 'volume 3.43'
req.body = jsonutils.dump_as_bytes(body)
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_context))
res_dict = jsonutils.loads(res.body)
# Get the new backup
req = webob.Request.blank('/v3/%s/backups/%s' % (
fake.PROJECT_ID, res_dict['backup']['id']))
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
req.headers['OpenStack-API-Version'] = 'volume 3.43'
res = req.get_response(fakes.wsgi_app(
fake_auth_context=self.user_context))
res_dict = jsonutils.loads(res.body)
self.assertEqual({'test_key': 'test_value'},
res_dict['backup']['metadata'])
volume.destroy()
@mock.patch('cinder.db.service_get_all')
def test_create_backup_inuse_no_force(self,
_mock_service_get_all):
@ -666,6 +753,7 @@ class BackupsAPITestCase(test.TestCase):
req = webob.Request.blank('/v2/%s/backups' % fake.PROJECT_ID)
req.method = 'POST'
req.environ['cinder.context'] = self.context
req.api_version_request = api_version.APIVersionRequest()
self.assertRaises(exception.InvalidInput,
self.controller.create,
req,

View File

@ -81,7 +81,8 @@ class BaseBackupTest(test.TestCase):
service=None,
temp_volume_id=None,
temp_snapshot_id=None,
snapshot_id=None):
snapshot_id=None,
metadata=None):
"""Create a backup entry in the DB.
Return the entry ID
@ -105,6 +106,7 @@ class BaseBackupTest(test.TestCase):
kwargs['object_count'] = object_count
kwargs['temp_volume_id'] = temp_volume_id
kwargs['temp_snapshot_id'] = temp_snapshot_id
kwargs['metadata'] = metadata or {}
backup = objects.Backup(context=self.ctxt, **kwargs)
backup.create()
return backup

View File

@ -42,6 +42,7 @@ fake_backup = {
'snapshot_id': None,
'data_timestamp': None,
'restore_volume_id': None,
'backup_metadata': {},
}
vol_props = {'status': 'available', 'size': 1}
@ -65,8 +66,10 @@ class TestBackup(test_objects.BaseObjectsTestCase):
def test_get_by_id_no_existing_id(self, model_query):
query = mock.Mock()
filter_by = mock.Mock()
query_options = mock.Mock()
filter_by.first.return_value = None
query.filter_by.return_value = filter_by
query_options.filter_by.return_value = filter_by
query.options.return_value = query_options
model_query.return_value = query
self.assertRaises(exception.BackupNotFound, objects.Backup.get_by_id,
self.context, 123)
@ -87,6 +90,20 @@ class TestBackup(test_objects.BaseObjectsTestCase):
backup_update.assert_called_once_with(self.context, backup.id,
{'display_name': 'foobar'})
@mock.patch('cinder.db.backup_metadata_update',
return_value={'key1': 'value1'})
@mock.patch('cinder.db.backup_update')
def test_save_with_metadata(self, backup_update, metadata_update):
backup = objects.Backup._from_db_object(
self.context, objects.Backup(), fake_backup)
backup.metadata = {'key1': 'value1'}
self.assertEqual({'metadata': {'key1': 'value1'}},
backup.obj_get_changes())
backup.save()
metadata_update.assert_called_once_with(self.context, backup.id,
{'key1': 'value1'}, True)
@mock.patch('oslo_utils.timeutils.utcnow', return_value=timeutils.utcnow())
@mock.patch('cinder.db.sqlalchemy.api.backup_destroy')
def test_destroy(self, backup_destroy, utcnow_mock):
@ -121,6 +138,11 @@ class TestBackup(test_objects.BaseObjectsTestCase):
restore_volume_id='2')
self.assertEqual('2', backup.restore_volume_id)
def test_obj_field_metadata(self):
backup = objects.Backup(context=self.context,
metadata={'test_key': 'test_value'})
self.assertEqual({'test_key': 'test_value'}, backup.metadata)
def test_import_record(self):
utils.replace_obj_loader(self, objects.Backup)
backup = objects.Backup(context=self.context, id=fake.BACKUP_ID,
@ -222,10 +244,7 @@ class TestBackupList(test_objects.BaseObjectsTestCase):
@mock.patch('cinder.db.backup_get_all_by_host',
return_value=[fake_backup])
def test_get_all_by_host(self, get_all_by_host):
fake_volume_obj = fake_volume.fake_volume_obj(self.context)
backups = objects.BackupList.get_all_by_host(self.context,
fake_volume_obj.id)
backups = objects.BackupList.get_all_by_host(self.context, "fake_host")
self.assertEqual(1, len(backups))
TestBackup._compare(self, fake_backup, backups[0])

View File

@ -23,9 +23,9 @@ from cinder import test
# NOTE: The hashes in this list should only be changed if they come with a
# corresponding version bump in the affected objects.
object_data = {
'Backup': '1.4-c50f7a68bb4c400dd53dd219685b3992',
'Backup': '1.5-3ab4b305bd43ec0cff6701fe2a849194',
'BackupDeviceInfo': '1.0-74b3950676c690538f4bc6796bd0042e',
'BackupImport': '1.4-c50f7a68bb4c400dd53dd219685b3992',
'BackupImport': '1.5-3ab4b305bd43ec0cff6701fe2a849194',
'BackupList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'CleanupRequest': '1.0-e7c688b893e1d5537ccf65cc3eb10a28',
'Cluster': '1.1-e2c533eb8cdd8d229b6c45c6cf3a9e2c',
@ -126,7 +126,9 @@ class TestObjectVersions(test.TestCase):
# the converted data, but at least ensures the method doesn't blow
# up on something simple.
init_args = {}
init_kwargs = {objects.Snapshot: {'context': 'ctxt'}}
init_kwargs = {objects.Snapshot: {'context': 'ctxt'},
objects.Backup: {'context': 'ctxt'},
objects.BackupImport: {'context': 'ctxt'}}
checker = fixture.ObjectVersionChecker(
base.CinderObjectRegistry.obj_classes())
checker.test_compatibility_routines(init_args=init_args,

View File

@ -552,6 +552,7 @@ class TestCinderManageCmd(test.TestCase):
'size': 123,
'object_count': 1,
'volume_id': fake.VOLUME_ID,
'backup_metadata': {},
}
backup_get_all.return_value = [backup]
with mock.patch('sys.stdout', new=six.StringIO()) as fake_out:
@ -605,6 +606,7 @@ class TestCinderManageCmd(test.TestCase):
'size': 123,
'object_count': 1,
'volume_id': fake.VOLUME_ID,
'backup_metadata': {},
}
backup_get_by_host.return_value = [backup]
backup_cmds = cinder_manage.BackupCommands()

View File

@ -2516,7 +2516,7 @@ class DBAPIBackupTestCase(BaseTest):
"""Tests for db.api.backup_* methods."""
_ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at',
'updated_at', 'data_timestamp']
'updated_at', 'data_timestamp', 'backup_metadata']
def setUp(self):
super(DBAPIBackupTestCase, self).setUp()

View File

@ -1261,6 +1261,30 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
messages = db_utils.get_table(engine, 'messages')
self.assertEqual(255, messages.c.project_id.type.length)
def _check_105(self, engine, data):
self.assertTrue(engine.dialect.has_table(engine.connect(),
"backup_metadata"))
backup_metadata = db_utils.get_table(engine, 'backup_metadata')
self.assertIsInstance(backup_metadata.c.created_at.type,
self.TIME_TYPE)
self.assertIsInstance(backup_metadata.c.updated_at.type,
self.TIME_TYPE)
self.assertIsInstance(backup_metadata.c.deleted_at.type,
self.TIME_TYPE)
self.assertIsInstance(backup_metadata.c.deleted.type,
self.BOOL_TYPE)
self.assertIsInstance(backup_metadata.c.id.type,
self.INTEGER_TYPE)
self.assertIsInstance(backup_metadata.c.key.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backup_metadata.c.value.type,
self.VARCHAR_TYPE)
self.assertIsInstance(backup_metadata.c.backup_id.type,
self.VARCHAR_TYPE)
f_keys = self.get_foreign_key_columns(engine, 'backup_metadata')
self.assertEqual({'backup_id'}, f_keys)
def test_walk_versions(self):
self.walk_versions(False, False)
self.assert_each_foreign_key_is_part_of_an_index()

View File

@ -344,6 +344,7 @@ def create_backup(ctxt,
container=None,
availability_zone=None,
host=None,
metadata=None,
**kwargs):
"""Create a backup object."""
values = {
@ -363,7 +364,8 @@ def create_backup(ctxt,
'temp_volume_id': temp_volume_id,
'temp_snapshot_id': temp_snapshot_id,
'snapshot_id': snapshot_id,
'data_timestamp': data_timestamp, }
'data_timestamp': data_timestamp,
'metadata': metadata or {}, }
values.update(kwargs)
backup = objects.Backup(ctxt, **values)