Merge "Improve quota usage for temporary resources"
This commit is contained in:
commit
ea5805255c
@ -147,13 +147,21 @@ class DbCommands(object):
|
||||
# NOTE: Online migrations cannot depend on having Cinder services running.
|
||||
# Migrations can be called during Fast-Forward Upgrades without having any
|
||||
# Cinder services up.
|
||||
# NOTE; Online migrations must be removed at the beginning of the next
|
||||
# NOTE: Online migrations must be removed at the beginning of the next
|
||||
# release to the one they've been introduced. A comment with the release
|
||||
# a migration is introduced and the one where it must be removed must
|
||||
# preceed any element of the "online_migrations" tuple, like this:
|
||||
# # Added in Queens remove in Rocky
|
||||
# db.service_uuids_online_data_migration,
|
||||
online_migrations = tuple()
|
||||
online_migrations = (
|
||||
# TODO: (Z Release) Remove next line and this comment
|
||||
# TODO: (Y Release) Uncomment next line and remove this comment
|
||||
# db.remove_temporary_admin_metadata_data_migration,
|
||||
|
||||
# TODO: (Y Release) Remove next 2 line and this comment
|
||||
db.volume_use_quota_online_data_migration,
|
||||
db.snapshot_use_quota_online_data_migration,
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
@ -1940,3 +1940,19 @@ def conditional_update(context, model, values, expected_values, filters=(),
|
||||
return IMPL.conditional_update(context, model, values, expected_values,
|
||||
filters, include_deleted, project_only,
|
||||
order)
|
||||
|
||||
|
||||
# TODO: (Y Release) remove method and this comment
|
||||
def volume_use_quota_online_data_migration(context, max_count):
|
||||
IMPL.volume_use_quota_online_data_migration(context, max_count)
|
||||
|
||||
|
||||
# TODO: (Y Release) remove method and this comment
|
||||
def snapshot_use_quota_online_data_migration(context, max_count):
|
||||
IMPL.snapshot_use_quota_online_data_migration(context, max_count)
|
||||
|
||||
|
||||
# TODO: (Z Release) remove method and this comment
|
||||
# TODO: (Y Release) uncomment method
|
||||
# def remove_temporary_admin_metadata_data_migration(context, max_count):
|
||||
# IMPL.remove_temporary_admin_metadata_data_migration(context, max_count)
|
||||
|
@ -1592,10 +1592,13 @@ def _volume_data_get_for_project(context, project_id, volume_type_id=None,
|
||||
# Also skip temporary volumes that have 'temporary' admin_metadata key set
|
||||
# to True.
|
||||
if skip_internal:
|
||||
# TODO: (Y release) replace everything inside this if with:
|
||||
# query = query.filter(model.use_quota)
|
||||
admin_model = models.VolumeAdminMetadata
|
||||
query = query.filter(
|
||||
and_(or_(model.migration_status.is_(None),
|
||||
~model.migration_status.startswith('target:')),
|
||||
~model.use_quota.is_(False),
|
||||
~sql.exists().where(and_(model.id == admin_model.volume_id,
|
||||
~admin_model.deleted,
|
||||
admin_model.key == 'temporary',
|
||||
@ -3271,13 +3274,19 @@ def snapshot_get_all_by_project(context, project_id, filters=None, marker=None,
|
||||
|
||||
@require_context
|
||||
def _snapshot_data_get_for_project(context, project_id, volume_type_id=None,
|
||||
session=None, host=None):
|
||||
session=None, host=None,
|
||||
skip_internal=True):
|
||||
authorize_project_context(context, project_id)
|
||||
query = model_query(context,
|
||||
func.count(models.Snapshot.id),
|
||||
func.sum(models.Snapshot.volume_size),
|
||||
read_deleted="no",
|
||||
session=session)
|
||||
if skip_internal:
|
||||
# TODO: (Y release) replace next line with:
|
||||
# query = query.filter(models.Snapshot.use_quota)
|
||||
query = query.filter(~models.Snapshot.use_quota.is_(False))
|
||||
|
||||
if volume_type_id or host:
|
||||
query = query.join('volume')
|
||||
if volume_type_id:
|
||||
@ -3294,8 +3303,11 @@ def _snapshot_data_get_for_project(context, project_id, volume_type_id=None,
|
||||
@require_context
|
||||
def snapshot_data_get_for_project(context, project_id,
|
||||
volume_type_id=None, host=None):
|
||||
# This method doesn't support filtering temporary resources (use_quota
|
||||
# field) and defaults to returning all snapshots because all callers (quota
|
||||
# sync methods and os-host API extension) require all the snapshots.
|
||||
return _snapshot_data_get_for_project(context, project_id, volume_type_id,
|
||||
host=host)
|
||||
host=host, skip_internal=False)
|
||||
|
||||
|
||||
@require_context
|
||||
@ -7377,3 +7389,67 @@ def conditional_update(context, model, values, expected_values, filters=(),
|
||||
# Return True if we were able to change any DB entry, False otherwise
|
||||
result = query.update(values, **update_args)
|
||||
return 0 != result
|
||||
|
||||
|
||||
# TODO: (Y Release) remove method and this comment
|
||||
@enginefacade.writer
|
||||
def volume_use_quota_online_data_migration(context, max_count):
|
||||
def calculate_use_quota(volume):
|
||||
return not (volume.migration_status.startswith('target:') or
|
||||
volume.admin_metadata.get('temporary') == 'True')
|
||||
|
||||
return use_quota_online_data_migration(context, max_count, 'Volume',
|
||||
calculate_use_quota)
|
||||
|
||||
|
||||
# TODO: (Y Release) remove method and this comment
|
||||
@enginefacade.writer
|
||||
def snapshot_use_quota_online_data_migration(context, max_count):
|
||||
# Temp snapshots are created in
|
||||
# - cinder.volume.manager.VolumeManager._create_backup_snapshot
|
||||
# - cinder.volume.driver.BaseVD.driver _create_temp_snapshot
|
||||
#
|
||||
# But we don't have a "good" way to know which ones are temporary as the
|
||||
# only identification is the display_name that can be "forged" by users.
|
||||
# Most users are not doing rolling upgrades so we'll assume there are no
|
||||
# temporary snapshots, not even volumes with display_name:
|
||||
# - '[revert] volume %s backup snapshot' % resource.volume_id
|
||||
# - 'backup-snap-%s' % resource.volume_id
|
||||
return use_quota_online_data_migration(context, max_count, 'Snapshot',
|
||||
lambda snapshot: True)
|
||||
|
||||
|
||||
# TODO: (Y Release) remove method and this comment
|
||||
@enginefacade.writer
|
||||
def use_quota_online_data_migration(context, max_count,
|
||||
resource_name, calculate_use_quota):
|
||||
updated = 0
|
||||
session = get_session()
|
||||
with session.begin():
|
||||
query = model_query(context,
|
||||
getattr(models, resource_name),
|
||||
session=session).filter_by(
|
||||
use_quota=None)
|
||||
total = query.count()
|
||||
resources = query.limit(max_count).with_for_update().all()
|
||||
for resource in resources:
|
||||
resource.use_quota = calculate_use_quota(resource)
|
||||
updated += 1
|
||||
|
||||
return total, updated
|
||||
|
||||
|
||||
# TODO: (Z Release) remove method and this comment
|
||||
# TODO: (Y Release) uncomment method
|
||||
# @enginefacade.writer
|
||||
# def remove_temporary_admin_metadata_data_migration(context, max_count):
|
||||
# session = get_session()
|
||||
# with session.begin():
|
||||
# query = model_query(context,
|
||||
# models.VolumeAdminMetadata,
|
||||
# session=session).filter_by(key='temporary')
|
||||
# total = query.count()
|
||||
# updated = query.limit(max_count).update(
|
||||
# models.VolumeAdminMetadata.delete_values)
|
||||
#
|
||||
# return total, updated
|
||||
|
@ -0,0 +1,34 @@
|
||||
# Copyright 2021 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
def upgrade(migrate_engine):
|
||||
"""Update volumes and snapshots tables with use_quota field.
|
||||
|
||||
Add use_quota field to both volumes and snapshots table to fast and easily
|
||||
identify resources that must be counted for quota usages.
|
||||
"""
|
||||
# Existing resources will be left with None value to allow rolling upgrades
|
||||
# with the online data migration pattern, since they will identify the
|
||||
# resources that don't have the field set/known yet.
|
||||
meta = sa.MetaData(bind=migrate_engine)
|
||||
for table_name in ('volumes', 'snapshots'):
|
||||
table = sa.Table(table_name, meta, autoload=True)
|
||||
|
||||
if not hasattr(table.c, 'use_quota'):
|
||||
column = sa.Column('use_quota', sa.Boolean, nullable=True)
|
||||
table.create_column(column)
|
@ -252,6 +252,9 @@ class Volume(BASE, CinderBase):
|
||||
|
||||
id = sa.Column(sa.String(36), primary_key=True)
|
||||
_name_id = sa.Column(sa.String(36)) # Don't access/modify this directly!
|
||||
# TODO: (Y release) Change nullable to False
|
||||
use_quota = Column(sa.Boolean, nullable=True, default=True,
|
||||
doc='Ignore volume in quota usage')
|
||||
|
||||
@property
|
||||
def name_id(self):
|
||||
@ -755,6 +758,9 @@ class Snapshot(BASE, CinderBase):
|
||||
"""Represents a snapshot of volume."""
|
||||
__tablename__ = 'snapshots'
|
||||
id = sa.Column(sa.String(36), primary_key=True)
|
||||
# TODO: (Y release) Change nullable to False
|
||||
use_quota = Column(sa.Boolean, nullable=True, default=True,
|
||||
doc='Ignore volume in quota usage')
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
@ -823,6 +829,7 @@ class Backup(BASE, CinderBase):
|
||||
"""Represents a backup of a volume to Swift."""
|
||||
__tablename__ = 'backups'
|
||||
id = sa.Column(sa.String(36), primary_key=True)
|
||||
# Backups don't have use_quota field since we don't have temporary backups
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
|
@ -135,6 +135,11 @@ OBJ_VERSIONS = CinderObjectVersionsHistory()
|
||||
# '1.35' and the self['<versioname>'] = { to self['1.35'] = {
|
||||
|
||||
|
||||
# TODO: (Z release) remove up to next TODO and update
|
||||
# CinderObjectVersionsHistory (was added in X release)
|
||||
OBJ_VERSIONS.add('1.39', {'Volume': '1.9', 'Snapshot': '1.6'})
|
||||
|
||||
|
||||
class CinderObjectRegistry(base.VersionedObjectRegistry):
|
||||
def registration_hook(self, cls, index):
|
||||
"""Hook called when registering a class.
|
||||
|
@ -13,6 +13,7 @@
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import versionutils
|
||||
from oslo_versionedobjects import fields
|
||||
|
||||
from cinder import db
|
||||
@ -38,7 +39,8 @@ class Snapshot(cleanable.CinderCleanableObject, base.CinderObject,
|
||||
# Version 1.3: SnapshotStatusField now includes "unmanaging"
|
||||
# Version 1.4: SnapshotStatusField now includes "backing-up"
|
||||
# Version 1.5: SnapshotStatusField now includes "restoring"
|
||||
VERSION = '1.5'
|
||||
# Version 1.6: Added use_quota
|
||||
VERSION = '1.6'
|
||||
|
||||
# NOTE(thangp): OPTIONAL_FIELDS are fields that would be lazy-loaded. They
|
||||
# are typically the relationship in the sqlalchemy object.
|
||||
@ -51,6 +53,8 @@ class Snapshot(cleanable.CinderCleanableObject, base.CinderObject,
|
||||
'user_id': fields.StringField(nullable=True),
|
||||
'project_id': fields.StringField(nullable=True),
|
||||
|
||||
# TODO: (Y release) Change nullable to False
|
||||
'use_quota': fields.BooleanField(default=True, nullable=True),
|
||||
'volume_id': fields.UUIDField(nullable=True),
|
||||
'cgsnapshot_id': fields.UUIDField(nullable=True),
|
||||
'group_snapshot_id': fields.UUIDField(nullable=True),
|
||||
@ -109,6 +113,15 @@ class Snapshot(cleanable.CinderCleanableObject, base.CinderObject,
|
||||
self._orig_metadata = (dict(self.metadata)
|
||||
if self.obj_attr_is_set('metadata') else {})
|
||||
|
||||
# TODO: (Y release) remove method
|
||||
@classmethod
|
||||
def _obj_from_primitive(cls, context, objver, primitive):
|
||||
primitive['versioned_object.data'].setdefault('use_quota', True)
|
||||
obj = super(Snapshot, Snapshot)._obj_from_primitive(context, objver,
|
||||
primitive)
|
||||
obj._reset_metadata_tracking()
|
||||
return obj
|
||||
|
||||
def obj_what_changed(self):
|
||||
changes = super(Snapshot, self).obj_what_changed()
|
||||
if hasattr(self, 'metadata') and self.metadata != self._orig_metadata:
|
||||
@ -116,6 +129,14 @@ class Snapshot(cleanable.CinderCleanableObject, base.CinderObject,
|
||||
|
||||
return changes
|
||||
|
||||
def obj_make_compatible(self, primitive, target_version):
|
||||
"""Make a Snapshot representation compatible with a target version."""
|
||||
super(Snapshot, self).obj_make_compatible(primitive, target_version)
|
||||
target_version = versionutils.convert_version_to_tuple(target_version)
|
||||
# TODO: (Y release) remove next 2 lines & method if nothing else below
|
||||
if target_version < (1, 6):
|
||||
primitive.pop('use_quota', None)
|
||||
|
||||
@classmethod
|
||||
def _from_db_object(cls, context, snapshot, db_snapshot,
|
||||
expected_attrs=None):
|
||||
@ -178,6 +199,8 @@ class Snapshot(cleanable.CinderCleanableObject, base.CinderObject,
|
||||
updates['volume_type_id'] = (
|
||||
volume_types.get_default_volume_type()['id'])
|
||||
|
||||
# TODO: (Y release) remove setting use_quota default, it's set by ORM
|
||||
updates.setdefault('use_quota', True)
|
||||
db_snapshot = db.snapshot_create(self._context, updates)
|
||||
self._from_db_object(self._context, self, db_snapshot)
|
||||
|
||||
|
@ -13,6 +13,8 @@
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import versionutils
|
||||
from oslo_versionedobjects import fields
|
||||
|
||||
from cinder import db
|
||||
@ -26,6 +28,8 @@ from cinder.volume import volume_types
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MetadataObject(dict):
|
||||
# This is a wrapper class that simulates SQLAlchemy (.*)Metadata objects to
|
||||
@ -62,7 +66,8 @@ class Volume(cleanable.CinderCleanableObject, base.CinderObject,
|
||||
# Version 1.6: This object is now cleanable (adds rows to workers table)
|
||||
# Version 1.7: Added service_uuid
|
||||
# Version 1.8: Added shared_targets
|
||||
VERSION = '1.8'
|
||||
# Version 1.9: Added use_quota
|
||||
VERSION = '1.9'
|
||||
|
||||
OPTIONAL_FIELDS = ('metadata', 'admin_metadata', 'glance_metadata',
|
||||
'volume_type', 'volume_attachment', 'consistencygroup',
|
||||
@ -76,6 +81,8 @@ class Volume(cleanable.CinderCleanableObject, base.CinderObject,
|
||||
'user_id': fields.StringField(nullable=True),
|
||||
'project_id': fields.StringField(nullable=True),
|
||||
|
||||
# TODO: (Y release) Change nullable to False
|
||||
'use_quota': fields.BooleanField(default=True, nullable=True),
|
||||
'snapshot_id': fields.UUIDField(nullable=True),
|
||||
|
||||
'cluster_name': fields.StringField(nullable=True),
|
||||
@ -208,6 +215,8 @@ class Volume(cleanable.CinderCleanableObject, base.CinderObject,
|
||||
|
||||
@classmethod
|
||||
def _obj_from_primitive(cls, context, objver, primitive):
|
||||
# TODO: (Y release) remove next line
|
||||
cls._ensure_use_quota_is_set(primitive['versioned_object.data'])
|
||||
obj = super(Volume, Volume)._obj_from_primitive(context, objver,
|
||||
primitive)
|
||||
obj._reset_metadata_tracking()
|
||||
@ -239,6 +248,14 @@ class Volume(cleanable.CinderCleanableObject, base.CinderObject,
|
||||
|
||||
return changes
|
||||
|
||||
def obj_make_compatible(self, primitive, target_version):
|
||||
"""Make a Volume representation compatible with a target version."""
|
||||
super(Volume, self).obj_make_compatible(primitive, target_version)
|
||||
target_version = versionutils.convert_version_to_tuple(target_version)
|
||||
# TODO: (Y release) remove next 2 lines & method if nothing else below
|
||||
if target_version < (1, 9):
|
||||
primitive.pop('use_quota', None)
|
||||
|
||||
@classmethod
|
||||
def _from_db_object(cls, context, volume, db_volume, expected_attrs=None):
|
||||
if expected_attrs is None:
|
||||
@ -312,6 +329,20 @@ class Volume(cleanable.CinderCleanableObject, base.CinderObject,
|
||||
volume.obj_reset_changes()
|
||||
return volume
|
||||
|
||||
# TODO: (Z release): Remove method and leave the default of False from DB
|
||||
@staticmethod
|
||||
def _ensure_use_quota_is_set(updates, warning=False):
|
||||
if updates.get('use_quota') is None:
|
||||
use_quota = not (
|
||||
(updates.get('migration_status') or ''
|
||||
).startswith('target:') or
|
||||
(updates.get('admin_metadata') or {}
|
||||
).get('temporary') == 'True')
|
||||
if warning and not use_quota:
|
||||
LOG.warning('Ooooops, we forgot to set the use_quota field to '
|
||||
'False!! Fix code here')
|
||||
updates['use_quota'] = use_quota
|
||||
|
||||
def create(self):
|
||||
if self.obj_attr_is_set('id'):
|
||||
raise exception.ObjectActionError(action='create',
|
||||
@ -335,11 +366,19 @@ class Volume(cleanable.CinderCleanableObject, base.CinderObject,
|
||||
updates['volume_type_id'] = (
|
||||
volume_types.get_default_volume_type()['id'])
|
||||
|
||||
# TODO: (Y release) Remove this call since we should have already made
|
||||
# all methods in Cinder make the call with the right values.
|
||||
self._ensure_use_quota_is_set(updates, warning=True)
|
||||
|
||||
db_volume = db.volume_create(self._context, updates)
|
||||
expected_attrs = self._get_expected_attrs(self._context)
|
||||
self._from_db_object(self._context, self, db_volume, expected_attrs)
|
||||
|
||||
def save(self):
|
||||
# TODO: (Y release) Remove this online migration code
|
||||
# Pass self directly since it's a CinderObjectDictCompat
|
||||
self._ensure_use_quota_is_set(self)
|
||||
|
||||
updates = self.cinder_obj_get_changes()
|
||||
if updates:
|
||||
# NOTE(xyang): Allow this to pass if 'consistencygroup' is
|
||||
@ -474,7 +513,7 @@ class Volume(cleanable.CinderCleanableObject, base.CinderObject,
|
||||
# We swap fields between source (i.e. self) and destination at the
|
||||
# end of migration because we want to keep the original volume id
|
||||
# in the DB but now pointing to the migrated volume.
|
||||
skip = ({'id', 'provider_location', 'glance_metadata',
|
||||
skip = ({'id', 'provider_location', 'glance_metadata', 'use_quota',
|
||||
'volume_type'} | set(self.obj_extra_fields))
|
||||
for key in set(dest_volume.fields.keys()) - skip:
|
||||
# Only swap attributes that are already set. We do not want to
|
||||
|
@ -200,6 +200,14 @@ class MigrationsMixin(test_migrations.WalkVersionsMixin):
|
||||
self.assertFalse(snap_table.c.volume_type_id.nullable)
|
||||
self.assertFalse(encrypt_table.c.volume_type_id.nullable)
|
||||
|
||||
def _check_145(self, engine, data):
|
||||
"""Test add use_quota columns."""
|
||||
for name in ('volumes', 'snapshots'):
|
||||
resources = db_utils.get_table(engine, name)
|
||||
self.assertIn('use_quota', resources.c)
|
||||
# TODO: (Y release) Alter in new migration & change to assertFalse
|
||||
self.assertTrue(resources.c.use_quota.nullable)
|
||||
|
||||
# NOTE: this test becomes slower with each addition of new DB migration.
|
||||
# 'pymysql' works much slower on slow nodes than 'psycopg2'. And such
|
||||
# timeout mostly required for testing of 'mysql' backend.
|
||||
|
@ -45,9 +45,9 @@ object_data = {
|
||||
'RequestSpec': '1.5-2f6efbb86107ee70cc1bb07f4bdb4ec7',
|
||||
'Service': '1.6-e881b6b324151dd861e09cdfffcdaccd',
|
||||
'ServiceList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
|
||||
'Snapshot': '1.5-ac1cdbd5b89588f6a8f44afdf6b8b201',
|
||||
'Snapshot': '1.6-a2a1b62ae7e8d2794359ae59aff47ff6',
|
||||
'SnapshotList': '1.0-15ecf022a68ddbb8c2a6739cfc9f8f5e',
|
||||
'Volume': '1.8-6cf615b72269cef48702a2a5c2940159',
|
||||
'Volume': '1.9-4e25e166fa38bfcf039dcac1b19465b1',
|
||||
'VolumeList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
|
||||
'VolumeAttachment': '1.3-e6a3f7c5590d19f1e3ff6f819fbe6593',
|
||||
'VolumeAttachmentList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
|
||||
|
@ -22,6 +22,7 @@ import pytz
|
||||
from cinder.db.sqlalchemy import models
|
||||
from cinder import exception
|
||||
from cinder import objects
|
||||
from cinder.objects import base as ovo_base
|
||||
from cinder.objects import fields
|
||||
from cinder.tests.unit import fake_constants as fake
|
||||
from cinder.tests.unit import fake_snapshot
|
||||
@ -229,6 +230,17 @@ class TestSnapshot(test_objects.BaseObjectsTestCase):
|
||||
mock.call(self.context,
|
||||
fake.SNAPSHOT_ID)])
|
||||
|
||||
@ddt.data('1.38', '1.39')
|
||||
def test_obj_make_compatible_use_quota_added(self, version):
|
||||
snapshot = objects.Snapshot(self.context, use_quota=False)
|
||||
|
||||
serializer = ovo_base.CinderObjectSerializer(version)
|
||||
primitive = serializer.serialize_entity(self.context, snapshot)
|
||||
|
||||
converted_snapshot = objects.Snapshot.obj_from_primitive(primitive)
|
||||
expected = version != '1.39'
|
||||
self.assertIs(expected, converted_snapshot.use_quota)
|
||||
|
||||
|
||||
class TestSnapshotList(test_objects.BaseObjectsTestCase):
|
||||
@mock.patch('cinder.objects.volume.Volume.get_by_id')
|
||||
|
@ -21,6 +21,7 @@ import pytz
|
||||
from cinder import context
|
||||
from cinder import exception
|
||||
from cinder import objects
|
||||
from cinder.objects import base as ovo_base
|
||||
from cinder.objects import fields
|
||||
from cinder.tests.unit.consistencygroup import fake_consistencygroup
|
||||
from cinder.tests.unit import fake_constants as fake
|
||||
@ -54,25 +55,68 @@ class TestVolume(test_objects.BaseObjectsTestCase):
|
||||
objects.Volume.get_by_id, self.context, 123)
|
||||
|
||||
@mock.patch('cinder.db.volume_create')
|
||||
def test_create(self, volume_create):
|
||||
# TODO: (Y release) remove ddt.data and ddt.unpack decorators
|
||||
@ddt.data(
|
||||
({}, True), # default value
|
||||
({'use_quota': True}, True), # Normal init
|
||||
({'use_quota': False}, False),
|
||||
({'migration_status': 'target:'}, False), # auto detect migrating
|
||||
({'migration_status': 'migrating:'}, True), # auto detect normal
|
||||
({'admin_metadata': {'temporary': True}}, False), # temp
|
||||
({'admin_metadata': {'something': True}}, True), # normal
|
||||
)
|
||||
@ddt.unpack
|
||||
def test_create(self, ovo, expected, volume_create):
|
||||
db_volume = fake_volume.fake_db_volume()
|
||||
volume_create.return_value = db_volume
|
||||
volume = objects.Volume(context=self.context)
|
||||
volume = objects.Volume(context=self.context, **ovo)
|
||||
volume.create()
|
||||
self.assertEqual(db_volume['id'], volume.id)
|
||||
|
||||
use_quota = volume_create.call_args[0][1]['use_quota']
|
||||
# TODO: (Y release) remove next line
|
||||
self.assertIs(expected, use_quota)
|
||||
|
||||
@mock.patch('cinder.db.volume_update')
|
||||
@ddt.data(False, True)
|
||||
def test_save(self, test_cg, volume_update):
|
||||
db_volume = fake_volume.fake_db_volume()
|
||||
# TODO: (Y release) replace ddt.data and ddt.unpack decorators with
|
||||
# @ddt.data(False, True)
|
||||
@ddt.data(
|
||||
(False, {}, True),
|
||||
(True, {}, True),
|
||||
(False, {'use_quota': True}, True),
|
||||
(False, {'use_quota': False}, False),
|
||||
(False, {'migration_status': 'target:'}, False),
|
||||
(False, {'migration_status': 'migrating:'}, True),
|
||||
(False,
|
||||
{'volume_admin_metadata': [{'key': 'temporary', 'value': True}]},
|
||||
False),
|
||||
(False,
|
||||
{'volume_admin_metadata': [{'key': 'something', 'value': True}]},
|
||||
True),
|
||||
)
|
||||
@ddt.unpack
|
||||
def test_save(self, test_cg, ovo, expected, volume_update):
|
||||
use_quota = ovo.pop('use_quota', None)
|
||||
db_volume = fake_volume.fake_db_volume(**ovo)
|
||||
# TODO: (Y release) remove expected_attrs
|
||||
if 'volume_admin_metadata' in ovo:
|
||||
expected_attrs = ['admin_metadata']
|
||||
else:
|
||||
expected_attrs = []
|
||||
volume = objects.Volume._from_db_object(self.context,
|
||||
objects.Volume(), db_volume)
|
||||
objects.Volume(), db_volume,
|
||||
expected_attrs=expected_attrs)
|
||||
volume.display_name = 'foobar'
|
||||
if test_cg:
|
||||
volume.consistencygroup = None
|
||||
# TODO: (Y release) remove next 2 lines
|
||||
if use_quota is not None:
|
||||
volume.use_quota = use_quota
|
||||
volume.save()
|
||||
# TODO: (Y release) remove use_quota
|
||||
volume_update.assert_called_once_with(self.context, volume.id,
|
||||
{'display_name': 'foobar'})
|
||||
{'display_name': 'foobar',
|
||||
'use_quota': expected})
|
||||
|
||||
def test_save_error(self):
|
||||
db_volume = fake_volume.fake_db_volume()
|
||||
@ -97,8 +141,10 @@ class TestVolume(test_objects.BaseObjectsTestCase):
|
||||
'metadata': {'key1': 'value1'}},
|
||||
volume.obj_get_changes())
|
||||
volume.save()
|
||||
# TODO: (Y release) remove use_quota
|
||||
volume_update.assert_called_once_with(self.context, volume.id,
|
||||
{'display_name': 'foobar'})
|
||||
{'display_name': 'foobar',
|
||||
'use_quota': True})
|
||||
metadata_update.assert_called_once_with(self.context, volume.id,
|
||||
{'key1': 'value1'}, True)
|
||||
|
||||
@ -388,12 +434,14 @@ class TestVolume(test_objects.BaseObjectsTestCase):
|
||||
def test_finish_volume_migration(self, volume_update, metadata_update,
|
||||
src_vol_type_id, dest_vol_type_id):
|
||||
src_volume_db = fake_volume.fake_db_volume(
|
||||
**{'id': fake.VOLUME_ID, 'volume_type_id': src_vol_type_id})
|
||||
**{'id': fake.VOLUME_ID, 'volume_type_id': src_vol_type_id,
|
||||
'use_quota': True})
|
||||
if src_vol_type_id:
|
||||
src_volume_db['volume_type'] = fake_volume.fake_db_volume_type(
|
||||
id=src_vol_type_id)
|
||||
dest_volume_db = fake_volume.fake_db_volume(
|
||||
**{'id': fake.VOLUME2_ID, 'volume_type_id': dest_vol_type_id})
|
||||
**{'id': fake.VOLUME2_ID, 'volume_type_id': dest_vol_type_id,
|
||||
'use_quota': False})
|
||||
if dest_vol_type_id:
|
||||
dest_volume_db['volume_type'] = fake_volume.fake_db_volume_type(
|
||||
id=dest_vol_type_id)
|
||||
@ -424,13 +472,16 @@ class TestVolume(test_objects.BaseObjectsTestCase):
|
||||
# finish_volume_migration
|
||||
ignore_keys = ('id', 'provider_location', '_name_id',
|
||||
'migration_status', 'display_description', 'status',
|
||||
'volume_glance_metadata', 'volume_type')
|
||||
'volume_glance_metadata', 'volume_type', 'use_quota')
|
||||
|
||||
dest_vol_dict = {k: updated_dest_volume[k] for k in
|
||||
updated_dest_volume.keys() if k not in ignore_keys}
|
||||
src_vol_dict = {k: src_volume[k] for k in src_volume.keys()
|
||||
if k not in ignore_keys}
|
||||
self.assertEqual(src_vol_dict, dest_vol_dict)
|
||||
# use_quota must not have been switched, we'll mess our quota otherwise
|
||||
self.assertTrue(src_volume.use_quota)
|
||||
self.assertFalse(updated_dest_volume.use_quota)
|
||||
|
||||
def test_volume_with_metadata_serialize_deserialize_no_changes(self):
|
||||
updates = {'volume_glance_metadata': [{'key': 'foo', 'value': 'bar'}],
|
||||
@ -444,7 +495,7 @@ class TestVolume(test_objects.BaseObjectsTestCase):
|
||||
@mock.patch('cinder.db.volume_admin_metadata_update')
|
||||
@mock.patch('cinder.db.sqlalchemy.api.volume_attach')
|
||||
def test_begin_attach(self, volume_attach, metadata_update):
|
||||
volume = fake_volume.fake_volume_obj(self.context)
|
||||
volume = fake_volume.fake_volume_obj(self.context, use_quota=True)
|
||||
db_attachment = fake_volume.volume_attachment_db_obj(
|
||||
volume_id=volume.id,
|
||||
attach_status=fields.VolumeAttachStatus.ATTACHING)
|
||||
@ -555,6 +606,29 @@ class TestVolume(test_objects.BaseObjectsTestCase):
|
||||
migration_status=migration_status)
|
||||
self.assertIs(expected, volume.is_migration_target())
|
||||
|
||||
@ddt.data(
|
||||
# We could lose value during rolling upgrade if we added a new temp
|
||||
# type in this upgrade and didn't take it into consideration
|
||||
('1.38', {'use_quota': False}, True),
|
||||
# On rehydration we auto calculate use_quota value if not present
|
||||
('1.38', {'migration_status': 'target:123'}, False),
|
||||
# Both versions in X
|
||||
('1.39', {'use_quota': True}, True),
|
||||
# In X we don't recalculate, since we transmit the field
|
||||
('1.39', {'migration_status': 'target:123', 'use_quota': True}, True),
|
||||
)
|
||||
@ddt.unpack
|
||||
def test_obj_make_compatible_use_quota_added(self, version, ovo, expected):
|
||||
volume = objects.Volume(self.context, **ovo)
|
||||
|
||||
# When serializing to v1.38 we'll lose the use_quota value so it will
|
||||
# be recalculated based on the Volume values
|
||||
serializer = ovo_base.CinderObjectSerializer(version)
|
||||
primitive = serializer.serialize_entity(self.context, volume)
|
||||
|
||||
converted_volume = objects.Volume.obj_from_primitive(primitive)
|
||||
self.assertIs(expected, converted_volume.use_quota)
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
class TestVolumeList(test_objects.BaseObjectsTestCase):
|
||||
|
@ -314,7 +314,7 @@ class SchedulerManagerTestCase(test.TestCase):
|
||||
# Test NoValidBackend exception behavior for create_volume.
|
||||
# Puts the volume in 'error' state and eats the exception.
|
||||
_mock_sched_create.side_effect = exception.NoValidBackend(reason="")
|
||||
volume = fake_volume.fake_volume_obj(self.context)
|
||||
volume = fake_volume.fake_volume_obj(self.context, use_quota=True)
|
||||
request_spec = {'volume_id': volume.id,
|
||||
'volume': {'id': volume.id, '_name_id': None,
|
||||
'metadata': {}, 'admin_metadata': {},
|
||||
@ -689,7 +689,7 @@ class SchedulerDriverModuleTestCase(test.TestCase):
|
||||
@mock.patch('cinder.db.volume_update')
|
||||
@mock.patch('cinder.objects.volume.Volume.get_by_id')
|
||||
def test_volume_host_update_db(self, _mock_volume_get, _mock_vol_update):
|
||||
volume = fake_volume.fake_volume_obj(self.context)
|
||||
volume = fake_volume.fake_volume_obj(self.context, use_quota=True)
|
||||
_mock_volume_get.return_value = volume
|
||||
|
||||
driver.volume_update_db(self.context, volume.id, 'fake_host',
|
||||
|
@ -532,7 +532,7 @@ class DBAPIVolumeTestCase(BaseTest):
|
||||
skip_internal=False)
|
||||
|
||||
@ddt.data((True, THREE_HUNDREDS, THREE),
|
||||
(False, THREE_HUNDREDS + ONE_HUNDREDS, THREE + 1))
|
||||
(False, THREE_HUNDREDS + 2 * ONE_HUNDREDS, THREE + 2))
|
||||
@ddt.unpack
|
||||
def test__volume_data_get_for_project_migrating(self, skip_internal,
|
||||
gigabytes, count):
|
||||
@ -554,6 +554,12 @@ class DBAPIVolumeTestCase(BaseTest):
|
||||
'host': 'h-%d' % i,
|
||||
'volume_type_id': fake.VOLUME_TYPE_ID,
|
||||
'migration_status': 'target:vol-id'})
|
||||
# This one will not be counted
|
||||
db.volume_create(self.ctxt, {'project_id': 'project',
|
||||
'size': ONE_HUNDREDS,
|
||||
'host': 'h-%d' % i,
|
||||
'volume_type_id': fake.VOLUME_TYPE_ID,
|
||||
'use_quota': False})
|
||||
|
||||
result = sqlalchemy_api._volume_data_get_for_project(
|
||||
self.ctxt, 'project', skip_internal=skip_internal)
|
||||
@ -2131,6 +2137,58 @@ class DBAPISnapshotTestCase(BaseTest):
|
||||
|
||||
self.assertEqual(should_be, db.snapshot_metadata_get(self.ctxt, 1))
|
||||
|
||||
@ddt.data((True, (THREE, THREE_HUNDREDS)),
|
||||
(False, (THREE + 1, THREE_HUNDREDS + ONE_HUNDREDS)))
|
||||
@ddt.unpack
|
||||
def test__snapshot_data_get_for_project_temp(self, skip_internal,
|
||||
expected):
|
||||
vol = db.volume_create(self.ctxt,
|
||||
{'project_id': 'project', 'size': 1,
|
||||
'volume_type_id': fake.VOLUME_TYPE_ID})
|
||||
|
||||
# Normal snapshots are always counted
|
||||
db.snapshot_create(
|
||||
self.ctxt,
|
||||
{'project_id': 'project',
|
||||
'volume_id': vol.id,
|
||||
'volume_type_id': vol.volume_type_id,
|
||||
'display_name': 'user snapshot',
|
||||
'volume_size': ONE_HUNDREDS})
|
||||
|
||||
# Old revert temp snapshots are counted, since display_name can be
|
||||
# forged by users
|
||||
db.snapshot_create(
|
||||
self.ctxt,
|
||||
{'project_id': 'project',
|
||||
'volume_id': vol.id,
|
||||
'volume_type_id': vol.volume_type_id,
|
||||
'display_name': '[revert] volume 123 backup snapshot',
|
||||
'volume_size': ONE_HUNDREDS})
|
||||
|
||||
# Old backup temp snapshots are counted, since display_name can be
|
||||
# forged by users
|
||||
db.snapshot_create(
|
||||
self.ctxt,
|
||||
{'project_id': 'project',
|
||||
'volume_id': vol.id,
|
||||
'volume_type_id': vol.volume_type_id,
|
||||
'display_name': 'backup-snap-123',
|
||||
'volume_size': ONE_HUNDREDS})
|
||||
|
||||
# This one will not be counted is skipping internal
|
||||
db.snapshot_create(
|
||||
self.ctxt,
|
||||
{'project_id': 'project',
|
||||
'volume_id': vol.id,
|
||||
'volume_type_id': vol.volume_type_id,
|
||||
'display_name': 'new type of temp snapshot',
|
||||
'use_quota': False,
|
||||
'volume_size': ONE_HUNDREDS})
|
||||
|
||||
result = sqlalchemy_api._snapshot_data_get_for_project(
|
||||
self.ctxt, 'project', skip_internal=skip_internal)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
class DBAPIConsistencygroupTestCase(BaseTest):
|
||||
@ -3765,3 +3823,98 @@ class DBAPIGroupTestCase(BaseTest):
|
||||
self.assertEqual(
|
||||
new_cluster_name + groups[i].cluster_name[len(cluster_name):],
|
||||
db_groups[i].cluster_name)
|
||||
|
||||
|
||||
class OnlineMigrationTestCase(BaseTest):
|
||||
# TODO: (Y Release) remove method and this comment
|
||||
@mock.patch.object(sqlalchemy_api,
|
||||
'snapshot_use_quota_online_data_migration')
|
||||
def test_db_snapshot_use_quota_online_data_migration(self, migration_mock):
|
||||
params = (mock.sentinel.ctxt, mock.sentinel.max_count)
|
||||
db.snapshot_use_quota_online_data_migration(*params)
|
||||
migration_mock.assert_called_once_with(*params)
|
||||
|
||||
# TODO: (Y Release) remove method and this comment
|
||||
@mock.patch.object(sqlalchemy_api,
|
||||
'volume_use_quota_online_data_migration')
|
||||
def test_db_volume_use_quota_online_data_migration(self, migration_mock):
|
||||
params = (mock.sentinel.ctxt, mock.sentinel.max_count)
|
||||
db.volume_use_quota_online_data_migration(*params)
|
||||
migration_mock.assert_called_once_with(*params)
|
||||
|
||||
# TODO: (Y Release) remove method and this comment
|
||||
@mock.patch.object(sqlalchemy_api, 'use_quota_online_data_migration')
|
||||
def test_snapshot_use_quota_online_data_migration(self, migration_mock):
|
||||
sqlalchemy_api.snapshot_use_quota_online_data_migration(
|
||||
self.ctxt, mock.sentinel.max_count)
|
||||
migration_mock.assert_called_once_with(self.ctxt,
|
||||
mock.sentinel.max_count,
|
||||
'Snapshot',
|
||||
mock.ANY)
|
||||
calculation_method = migration_mock.call_args[0][3]
|
||||
# Confirm we always set the field to True regardless of what we pass
|
||||
self.assertTrue(calculation_method(None))
|
||||
|
||||
# TODO: (Y Release) remove method and this comment
|
||||
@mock.patch.object(sqlalchemy_api, 'use_quota_online_data_migration')
|
||||
def test_volume_use_quota_online_data_migration(self, migration_mock):
|
||||
sqlalchemy_api.volume_use_quota_online_data_migration(
|
||||
self.ctxt, mock.sentinel.max_count)
|
||||
migration_mock.assert_called_once_with(self.ctxt,
|
||||
mock.sentinel.max_count,
|
||||
'Volume',
|
||||
mock.ANY)
|
||||
calculation_method = migration_mock.call_args[0][3]
|
||||
|
||||
# Confirm we set use_quota field to False for temporary volumes
|
||||
temp_volume = mock.Mock(admin_metadata={'temporary': True})
|
||||
self.assertFalse(calculation_method(temp_volume))
|
||||
|
||||
# Confirm we set use_quota field to False for temporary volumes
|
||||
migration_dest_volume = mock.Mock(migration_status='target:123')
|
||||
self.assertFalse(calculation_method(migration_dest_volume))
|
||||
|
||||
# Confirm we set use_quota field to False in other cases
|
||||
volume = mock.Mock(admin_metadata={'temporary': False},
|
||||
migration_status='success')
|
||||
self.assertTrue(calculation_method(volume))
|
||||
|
||||
# TODO: (Y Release) remove method and this comment
|
||||
@mock.patch.object(sqlalchemy_api, 'models')
|
||||
@mock.patch.object(sqlalchemy_api, 'model_query')
|
||||
@mock.patch.object(sqlalchemy_api, 'get_session')
|
||||
def test_use_quota_online_data_migration(self, session_mock, query_mock,
|
||||
models_mock):
|
||||
calculate_method = mock.Mock()
|
||||
resource1 = mock.Mock()
|
||||
resource2 = mock.Mock()
|
||||
query = query_mock.return_value.filter_by.return_value
|
||||
query_all = query.limit.return_value.with_for_update.return_value.all
|
||||
query_all.return_value = [resource1, resource2]
|
||||
|
||||
result = sqlalchemy_api.use_quota_online_data_migration(
|
||||
self.ctxt, mock.sentinel.max_count, 'resource_name',
|
||||
calculate_method)
|
||||
|
||||
session_mock.assert_called_once_with()
|
||||
session = session_mock.return_value
|
||||
session.begin.assert_called_once_with()
|
||||
session.begin.return_value.__enter__.assert_called_once_with()
|
||||
session.begin.return_value.__exit__.assert_called_once_with(
|
||||
None, None, None)
|
||||
|
||||
query_mock.assert_called_once_with(self.ctxt,
|
||||
models_mock.resource_name,
|
||||
session=session)
|
||||
query_mock.return_value.filter_by.assert_called_once_with(
|
||||
use_quota=None)
|
||||
query.count.assert_called_once_with()
|
||||
query.limit.assert_called_once_with(mock.sentinel.max_count)
|
||||
query.limit.return_value.with_for_update.assert_called_once_with()
|
||||
query_all.assert_called_once_with()
|
||||
|
||||
calculate_method.assert_has_calls((mock.call(resource1),
|
||||
mock.call(resource2)))
|
||||
self.assertEqual(calculate_method.return_value, resource1.use_quota)
|
||||
self.assertEqual(calculate_method.return_value, resource2.use_quota)
|
||||
self.assertEqual((query.count.return_value, 2), result)
|
||||
|
@ -93,10 +93,10 @@ def create_volume(ctxt,
|
||||
if id:
|
||||
with mock.patch('cinder.objects.Volume.obj_attr_is_set',
|
||||
obj_attr_is_set(objects.Volume)):
|
||||
volume = objects.Volume(ctxt, id=id, **vol)
|
||||
volume = objects.Volume(context=ctxt, id=id, **vol)
|
||||
volume.create()
|
||||
else:
|
||||
volume = objects.Volume(ctxt, **vol)
|
||||
volume = objects.Volume(context=ctxt, **vol)
|
||||
volume.create()
|
||||
|
||||
# If we get a TestCase instance we add cleanup
|
||||
|
@ -1327,7 +1327,9 @@ class CreateVolumeFlowManagerTestCase(test.TestCase):
|
||||
event_suffix,
|
||||
host=volume.host)
|
||||
|
||||
@ddt.data(False, True)
|
||||
# Test possible combinations to confirm volumes from W, X, Y releases work
|
||||
@ddt.data((False, True), (True, None), (True, False))
|
||||
@ddt.unpack
|
||||
@mock.patch('taskflow.engines.load')
|
||||
@mock.patch.object(create_volume_manager, 'CreateVolumeOnFinishTask')
|
||||
@mock.patch.object(create_volume_manager, 'CreateVolumeFromSpecTask')
|
||||
@ -1336,9 +1338,9 @@ class CreateVolumeFlowManagerTestCase(test.TestCase):
|
||||
@mock.patch.object(create_volume_manager, 'OnFailureRescheduleTask')
|
||||
@mock.patch.object(create_volume_manager, 'ExtractVolumeRefTask')
|
||||
@mock.patch.object(create_volume_manager.linear_flow, 'Flow')
|
||||
def test_get_flow(self, is_migration_target, flow_mock, extract_ref_mock,
|
||||
onfailure_mock, extract_spec_mock, notify_mock,
|
||||
create_mock, onfinish_mock, load_mock):
|
||||
def test_get_flow(self, is_migration_target, use_quota, flow_mock,
|
||||
extract_ref_mock, onfailure_mock, extract_spec_mock,
|
||||
notify_mock, create_mock, onfinish_mock, load_mock):
|
||||
assert(isinstance(is_migration_target, bool))
|
||||
filter_properties = {'retry': mock.sentinel.retry}
|
||||
tasks = [mock.call(extract_ref_mock.return_value),
|
||||
@ -1348,8 +1350,9 @@ class CreateVolumeFlowManagerTestCase(test.TestCase):
|
||||
mock.call(create_mock.return_value,
|
||||
onfinish_mock.return_value)]
|
||||
|
||||
volume = mock.Mock()
|
||||
volume.is_migration_target.return_value = is_migration_target
|
||||
volume = mock.Mock(
|
||||
**{'is_migration_target.return_value': is_migration_target,
|
||||
'use_quota': use_quota})
|
||||
|
||||
result = create_volume_manager.get_flow(
|
||||
mock.sentinel.context,
|
||||
@ -1365,8 +1368,9 @@ class CreateVolumeFlowManagerTestCase(test.TestCase):
|
||||
filter_properties,
|
||||
mock.sentinel.image_volume_cache)
|
||||
|
||||
volume.is_migration_target.assert_called_once_with()
|
||||
if is_migration_target:
|
||||
if not volume.quota_use:
|
||||
volume.is_migration_target.assert_called_once_with()
|
||||
if is_migration_target or not use_quota:
|
||||
tasks.pop(3)
|
||||
notify_mock.assert_not_called()
|
||||
end_notify_suffix = None
|
||||
@ -1858,7 +1862,9 @@ class CreateVolumeFlowManagerImageCacheTestCase(test.TestCase):
|
||||
self.mock_image_service)
|
||||
|
||||
self.assertTrue(mock_cleanup_cg.called)
|
||||
mock_volume_update.assert_any_call(self.ctxt, volume.id, {'size': 1})
|
||||
# Online migration of the use_quota field
|
||||
mock_volume_update.assert_any_call(self.ctxt, volume.id,
|
||||
{'size': 1, 'use_quota': True})
|
||||
self.assertEqual(volume_size, volume.size)
|
||||
|
||||
@mock.patch('cinder.image.image_utils.check_available_space')
|
||||
@ -1995,7 +2001,9 @@ class CreateVolumeFlowManagerImageCacheTestCase(test.TestCase):
|
||||
)
|
||||
|
||||
# The volume size should be reduced to virtual_size and then put back
|
||||
mock_volume_update.assert_any_call(self.ctxt, volume.id, {'size': 2})
|
||||
# Online migration of the use_quota field
|
||||
mock_volume_update.assert_any_call(self.ctxt, volume.id,
|
||||
{'size': 2, 'use_quota': True})
|
||||
mock_volume_update.assert_any_call(self.ctxt, volume.id, {'size': 10})
|
||||
|
||||
# Make sure created a new cache entry
|
||||
@ -2073,7 +2081,9 @@ class CreateVolumeFlowManagerImageCacheTestCase(test.TestCase):
|
||||
# The volume size should be reduced to virtual_size and then put back,
|
||||
# especially if there is an exception while creating the volume.
|
||||
self.assertEqual(2, mock_volume_update.call_count)
|
||||
mock_volume_update.assert_any_call(self.ctxt, volume.id, {'size': 2})
|
||||
# Online migration of the use_quota field
|
||||
mock_volume_update.assert_any_call(self.ctxt, volume.id,
|
||||
{'size': 2, 'use_quota': True})
|
||||
mock_volume_update.assert_any_call(self.ctxt, volume.id, {'size': 10})
|
||||
|
||||
# Make sure we didn't try and create a cache entry
|
||||
|
@ -270,6 +270,38 @@ class GenericVolumeDriverTestCase(BaseDriverTestCase):
|
||||
temp_vol.attach_status)
|
||||
self.assertEqual('fakezone', temp_vol.availability_zone)
|
||||
self.assertEqual('fakecluster', temp_vol.cluster_name)
|
||||
self.assertFalse(temp_vol.use_quota)
|
||||
|
||||
def test__create_temp_snapshot(self):
|
||||
volume_dict = {'id': fake.SNAPSHOT_ID,
|
||||
'host': 'fakehost',
|
||||
'cluster_name': 'fakecluster',
|
||||
'availability_zone': 'fakezone',
|
||||
'size': 1,
|
||||
'volume_type_id': fake.VOLUME_TYPE_ID}
|
||||
volume = fake_volume.fake_volume_obj(self.context, **volume_dict)
|
||||
|
||||
# We want to confirm that the driver properly updates fields with the
|
||||
# value returned by the create_snapshot method
|
||||
driver_updates = {'provider_location': 'driver_provider_location'}
|
||||
|
||||
with mock.patch.object(self.volume.driver, 'create_snapshot',
|
||||
return_value=driver_updates) as create_mock:
|
||||
res = self.volume.driver._create_temp_snapshot(self.context,
|
||||
volume)
|
||||
create_mock.assert_called_once_with(res)
|
||||
|
||||
expected = {'volume_id': volume.id,
|
||||
'progress': '100%',
|
||||
'status': fields.SnapshotStatus.AVAILABLE,
|
||||
'use_quota': False, # Temporary snapshots don't use quota
|
||||
'project_id': self.context.project_id,
|
||||
'user_id': self.context.user_id,
|
||||
'volume_size': volume.size,
|
||||
'volume_type_id': volume.volume_type_id,
|
||||
'provider_location': 'driver_provider_location'}
|
||||
for key, value in expected.items():
|
||||
self.assertEqual(value, res[key])
|
||||
|
||||
@mock.patch.object(volume_utils, 'brick_get_connector_properties')
|
||||
@mock.patch.object(cinder.volume.manager.VolumeManager, '_attach_volume')
|
||||
|
@ -481,14 +481,15 @@ class ImageVolumeTestCases(base.BaseVolumeTestCase):
|
||||
@mock.patch('cinder.quota.QUOTAS.reserve', return_value=["RESERVATION"])
|
||||
def test_clone_image_volume(self, mock_reserve, mock_commit,
|
||||
mock_rollback, mock_cloned_volume):
|
||||
vol = tests_utils.create_volume(self.context,
|
||||
# Confirm cloning does not copy quota use field
|
||||
vol = tests_utils.create_volume(self.context, use_quota=False,
|
||||
**self.volume_params)
|
||||
# unnecessary attributes should be removed from image volume
|
||||
vol.consistencygroup = None
|
||||
result = self.volume._clone_image_volume(self.context, vol,
|
||||
{'id': fake.VOLUME_ID})
|
||||
|
||||
self.assertNotEqual(False, result)
|
||||
self.assertTrue(result.use_quota) # Original was False
|
||||
mock_reserve.assert_called_once_with(self.context, volumes=1,
|
||||
volumes_vol_type_name=1,
|
||||
gigabytes=vol.size,
|
||||
|
@ -349,13 +349,14 @@ class VolumeTestCase(base.BaseVolumeTestCase):
|
||||
self.assertEqual("error_deleting", volume.status)
|
||||
volume.destroy()
|
||||
|
||||
@ddt.data(True, False)
|
||||
@mock.patch('cinder.utils.clean_volume_file_locks')
|
||||
@mock.patch('cinder.tests.unit.fake_notifier.FakeNotifier._notify')
|
||||
@mock.patch('cinder.quota.QUOTAS.rollback', new=mock.Mock())
|
||||
@mock.patch('cinder.quota.QUOTAS.commit', new=mock.Mock())
|
||||
@mock.patch('cinder.quota.QUOTAS.commit')
|
||||
@mock.patch('cinder.quota.QUOTAS.reserve', return_value=['RESERVATION'])
|
||||
def test_create_delete_volume(self, _mock_reserve, mock_notify,
|
||||
mock_clean):
|
||||
def test_create_delete_volume(self, use_quota, _mock_reserve, commit_mock,
|
||||
mock_notify, mock_clean):
|
||||
"""Test volume can be created and deleted."""
|
||||
volume = tests_utils.create_volume(
|
||||
self.context,
|
||||
@ -374,19 +375,33 @@ class VolumeTestCase(base.BaseVolumeTestCase):
|
||||
self.assertEqual({'_pool0': {'allocated_capacity_gb': 1}},
|
||||
self.volume.stats['pools'])
|
||||
|
||||
# Confirm delete_volume handles use_quota field
|
||||
volume.use_quota = use_quota
|
||||
volume.save() # Need to save to DB because of the refresh call
|
||||
commit_mock.reset_mock()
|
||||
_mock_reserve.reset_mock()
|
||||
mock_notify.reset_mock()
|
||||
self.volume.delete_volume(self.context, volume)
|
||||
vol = db.volume_get(context.get_admin_context(read_deleted='yes'),
|
||||
volume_id)
|
||||
self.assertEqual(vol['status'], 'deleted')
|
||||
|
||||
self.assert_notify_called(mock_notify,
|
||||
(['INFO', 'volume.create.start'],
|
||||
['INFO', 'volume.create.end'],
|
||||
['INFO', 'volume.delete.start'],
|
||||
['INFO', 'volume.delete.end']),
|
||||
any_order=True)
|
||||
self.assertEqual({'_pool0': {'allocated_capacity_gb': 0}},
|
||||
self.volume.stats['pools'])
|
||||
if use_quota:
|
||||
expected_capacity = 0
|
||||
self.assert_notify_called(mock_notify,
|
||||
(['INFO', 'volume.delete.start'],
|
||||
['INFO', 'volume.delete.end']),
|
||||
any_order=True)
|
||||
self.assertEqual(1, _mock_reserve.call_count)
|
||||
self.assertEqual(1, commit_mock.call_count)
|
||||
else:
|
||||
expected_capacity = 1
|
||||
mock_notify.assert_not_called()
|
||||
_mock_reserve.assert_not_called()
|
||||
commit_mock.assert_not_called()
|
||||
self.assertEqual(
|
||||
{'_pool0': {'allocated_capacity_gb': expected_capacity}},
|
||||
self.volume.stats['pools'])
|
||||
|
||||
self.assertRaises(exception.NotFound,
|
||||
db.volume_get,
|
||||
@ -2337,7 +2352,7 @@ class VolumeTestCase(base.BaseVolumeTestCase):
|
||||
|
||||
if use_temp_snapshot and has_snapshot:
|
||||
_delete_snapshot.assert_called_once_with(
|
||||
self.context, {'id': 'fake_snapshot'}, handle_quota=False)
|
||||
self.context, {'id': 'fake_snapshot'})
|
||||
else:
|
||||
_delete_snapshot.assert_not_called()
|
||||
|
||||
@ -2391,6 +2406,47 @@ class VolumeTestCase(base.BaseVolumeTestCase):
|
||||
fake_volume,
|
||||
fake_snapshot)
|
||||
|
||||
@ddt.data(True, False)
|
||||
@mock.patch('cinder.quota.QUOTAS.commit')
|
||||
@mock.patch('cinder.quota.QUOTAS.reserve')
|
||||
@mock.patch.object(vol_manager.VolumeManager,
|
||||
'_notify_about_snapshot_usage')
|
||||
@mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'delete_snapshot')
|
||||
def test_delete_snapshot(self, use_quota, delete_mock, notify_mock,
|
||||
reserve_mock, commit_mock):
|
||||
"""Test delete snapshot."""
|
||||
volume = tests_utils.create_volume(self.context, CONF.host)
|
||||
|
||||
snapshot = create_snapshot(volume.id, size=volume.size,
|
||||
ctxt=self.context,
|
||||
use_quota=use_quota,
|
||||
status=fields.SnapshotStatus.AVAILABLE)
|
||||
|
||||
self.volume.delete_snapshot(self.context, snapshot)
|
||||
|
||||
delete_mock.assert_called_once_with(snapshot)
|
||||
self.assertEqual(2, notify_mock.call_count)
|
||||
notify_mock.assert_has_calls((
|
||||
mock.call(mock.ANY, snapshot, 'delete.start'),
|
||||
mock.call(mock.ANY, snapshot, 'delete.end'),
|
||||
))
|
||||
|
||||
if use_quota:
|
||||
reserve_mock.assert_called_once_with(
|
||||
mock.ANY, project_id=snapshot.project_id,
|
||||
gigabytes=-snapshot.volume_size,
|
||||
gigabytes_vol_type_name=-snapshot.volume_size,
|
||||
snapshots=-1, snapshots_vol_type_name=-1)
|
||||
commit_mock.assert_called_once_with(mock.ANY,
|
||||
reserve_mock.return_value,
|
||||
project_id=snapshot.project_id)
|
||||
else:
|
||||
reserve_mock.assert_not_called()
|
||||
commit_mock.assert_not_called()
|
||||
|
||||
self.assertEqual(fields.SnapshotStatus.DELETED, snapshot.status)
|
||||
self.assertTrue(snapshot.deleted)
|
||||
|
||||
def test_cannot_delete_volume_with_snapshots(self):
|
||||
"""Test volume can't be deleted with dependent snapshots."""
|
||||
volume = tests_utils.create_volume(self.context, **self.volume_params)
|
||||
|
@ -93,7 +93,7 @@ class VolumeMigrationTestCase(base.BaseVolumeTestCase):
|
||||
lambda x, y, z, new_type_id=None: (
|
||||
True, {'user_id': fake.USER_ID}))
|
||||
|
||||
volume = tests_utils.create_volume(self.context, size=0,
|
||||
volume = tests_utils.create_volume(ctxt=self.context, size=0,
|
||||
host=CONF.host,
|
||||
migration_status='migrating')
|
||||
host_obj = {'host': 'newhost', 'capabilities': {}}
|
||||
@ -208,6 +208,9 @@ class VolumeMigrationTestCase(base.BaseVolumeTestCase):
|
||||
def test_migrate_volume_generic(self, volume_get,
|
||||
migrate_volume_completion,
|
||||
nova_api):
|
||||
def Volume(original=objects.Volume, **kwargs):
|
||||
return original(**kwargs)
|
||||
|
||||
fake_db_new_volume = {'status': 'available', 'id': fake.VOLUME_ID}
|
||||
fake_new_volume = fake_volume.fake_db_volume(**fake_db_new_volume)
|
||||
new_volume_obj = fake_volume.fake_volume_obj(self.context,
|
||||
@ -217,10 +220,15 @@ class VolumeMigrationTestCase(base.BaseVolumeTestCase):
|
||||
update_server_volume = nova_api.return_value.update_server_volume
|
||||
volume = tests_utils.create_volume(self.context, size=1,
|
||||
host=CONF.host)
|
||||
|
||||
volume_mock = self.mock_object(objects, 'Volume', side_effect=Volume)
|
||||
with mock.patch.object(self.volume, '_copy_volume_data') as \
|
||||
mock_copy_volume:
|
||||
self.volume._migrate_volume_generic(self.context, volume,
|
||||
host_obj, None)
|
||||
|
||||
# Temporary created volume must not use quota
|
||||
self.assertFalse(volume_mock.call_args[1]['use_quota'])
|
||||
mock_copy_volume.assert_called_with(self.context, volume,
|
||||
new_volume_obj,
|
||||
remote='dest')
|
||||
|
@ -402,9 +402,9 @@ class API(base.Base):
|
||||
# Note(zhiteng): update volume quota reservation
|
||||
try:
|
||||
reservations = None
|
||||
if volume.status != 'error_managing':
|
||||
LOG.debug("Decrease volume quotas only if status is not "
|
||||
"error_managing.")
|
||||
if volume.status != 'error_managing' and volume.use_quota:
|
||||
LOG.debug("Decrease volume quotas for non temporary volume"
|
||||
" in non error_managing status.")
|
||||
reserve_opts = {'volumes': -1, 'gigabytes': -volume.size}
|
||||
QUOTAS.add_volume_type_opts(context,
|
||||
reserve_opts,
|
||||
|
@ -1305,6 +1305,7 @@ class BaseVD(object, metaclass=abc.ABCMeta):
|
||||
'display_description': None,
|
||||
'volume_type_id': volume['volume_type_id'],
|
||||
'encryption_key_id': volume['encryption_key_id'],
|
||||
'use_quota': False, # Don't count for quota
|
||||
'metadata': {},
|
||||
}
|
||||
temp_snap_ref = objects.Snapshot(context=context, **kwargs)
|
||||
@ -1337,6 +1338,8 @@ class BaseVD(object, metaclass=abc.ABCMeta):
|
||||
'attach_status': fields.VolumeAttachStatus.DETACHED,
|
||||
'availability_zone': volume.availability_zone,
|
||||
'volume_type_id': volume.volume_type_id,
|
||||
'use_quota': False, # Don't count for quota
|
||||
# TODO: (Y release) Remove admin_metadata and only use use_quota
|
||||
'admin_metadata': {'temporary': 'True'},
|
||||
}
|
||||
kwargs.update(volume_options or {})
|
||||
|
@ -1352,7 +1352,8 @@ def get_flow(context, manager, db, driver, scheduler_rpcapi, host, volume,
|
||||
volume_flow.add(ExtractVolumeSpecTask(db))
|
||||
# Temporary volumes created during migration should not be notified
|
||||
end_notify_suffix = None
|
||||
if not volume.is_migration_target():
|
||||
# TODO: (Y release) replace check with: if volume.use_quota:
|
||||
if volume.use_quota or not volume.is_migration_target():
|
||||
volume_flow.add(NotifyVolumeActionTask(db, 'create.start'))
|
||||
end_notify_suffix = 'create.end'
|
||||
volume_flow.add(CreateVolumeFromSpecTask(manager,
|
||||
|
@ -235,7 +235,8 @@ class VolumeManager(manager.CleanableManager,
|
||||
_VOLUME_CLONE_SKIP_PROPERTIES = {
|
||||
'id', '_name_id', 'name_id', 'name', 'status',
|
||||
'attach_status', 'migration_status', 'volume_type',
|
||||
'consistencygroup', 'volume_attachment', 'group', 'snapshots'}
|
||||
'consistencygroup', 'volume_attachment', 'group', 'snapshots',
|
||||
'use_quota'}
|
||||
|
||||
def _get_service(self,
|
||||
host: Optional[str] = None,
|
||||
@ -928,23 +929,22 @@ class VolumeManager(manager.CleanableManager,
|
||||
reason=_("Unmanage and cascade delete options "
|
||||
"are mutually exclusive."))
|
||||
|
||||
# To backup a snapshot or a 'in-use' volume, create a temp volume
|
||||
# from the snapshot or in-use volume, and back it up.
|
||||
# Get admin_metadata (needs admin context) to detect temporary volume.
|
||||
is_temp_vol = False
|
||||
with volume.obj_as_admin():
|
||||
if volume.admin_metadata.get('temporary', 'False') == 'True':
|
||||
is_temp_vol = True
|
||||
LOG.info("Trying to delete temp volume: %s", volume.id)
|
||||
|
||||
# We have temporary volumes that did not modify the quota on creation
|
||||
# and should not modify it when deleted. These temporary volumes are
|
||||
# created for volume migration between backends and for backups (from
|
||||
# in-use volume or snapshot).
|
||||
# TODO: (Y release) replace until the if do_quota (including comments)
|
||||
# with: do_quota = volume.use_quota
|
||||
# The status 'deleting' is not included, because it only applies to
|
||||
# the source volume to be deleted after a migration. No quota
|
||||
# needs to be handled for it.
|
||||
is_migrating = volume.migration_status not in (None, 'error',
|
||||
'success')
|
||||
# If deleting source/destination volume in a migration or a temp
|
||||
# volume for backup, we should skip quotas.
|
||||
do_quota = not (is_migrating or is_temp_vol)
|
||||
# Get admin_metadata (needs admin context) to detect temporary volume.
|
||||
with volume.obj_as_admin():
|
||||
do_quota = not (volume.use_quota is False or is_migrating or
|
||||
volume.admin_metadata.get('temporary') == 'True')
|
||||
|
||||
if do_quota:
|
||||
notification = 'unmanage.' if unmanage_only else 'delete.'
|
||||
self._notify_about_volume_usage(context, volume,
|
||||
@ -992,6 +992,8 @@ class VolumeManager(manager.CleanableManager,
|
||||
|
||||
self._clear_db(volume, new_status)
|
||||
|
||||
# If deleting source/destination volume in a migration or a temp
|
||||
# volume for backup, we should skip quotas.
|
||||
if do_quota:
|
||||
# Get reservations
|
||||
try:
|
||||
@ -1103,6 +1105,7 @@ class VolumeManager(manager.CleanableManager,
|
||||
'creating new volume with this snapshot.',
|
||||
'volume_type_id': volume.volume_type_id,
|
||||
'encryption_key_id': volume.encryption_key_id,
|
||||
'use_quota': False, # Don't use quota for temporary snapshot
|
||||
'metadata': {}
|
||||
}
|
||||
snapshot = objects.Snapshot(context=context, **kwargs)
|
||||
@ -1188,8 +1191,7 @@ class VolumeManager(manager.CleanableManager,
|
||||
"please manually reset it.") % msg_args
|
||||
raise exception.BadResetResourceStatus(reason=msg)
|
||||
if backup_snapshot:
|
||||
self.delete_snapshot(context,
|
||||
backup_snapshot, handle_quota=False)
|
||||
self.delete_snapshot(context, backup_snapshot)
|
||||
msg = ('Volume %(v_id)s reverted to snapshot %(snap_id)s '
|
||||
'successfully.')
|
||||
msg_args = {'v_id': volume.id, 'snap_id': snapshot.id}
|
||||
@ -1275,8 +1277,7 @@ class VolumeManager(manager.CleanableManager,
|
||||
def delete_snapshot(self,
|
||||
context: context.RequestContext,
|
||||
snapshot: objects.Snapshot,
|
||||
unmanage_only: bool = False,
|
||||
handle_quota: bool = True) -> Optional[bool]:
|
||||
unmanage_only: bool = False) -> Optional[bool]:
|
||||
"""Deletes and unexports snapshot."""
|
||||
context = context.elevated()
|
||||
snapshot._context = context
|
||||
@ -1324,7 +1325,7 @@ class VolumeManager(manager.CleanableManager,
|
||||
# Get reservations
|
||||
reservations = None
|
||||
try:
|
||||
if handle_quota:
|
||||
if snapshot.use_quota:
|
||||
if CONF.no_snapshot_gb_quota:
|
||||
reserve_opts = {'snapshots': -1}
|
||||
else:
|
||||
@ -2320,6 +2321,7 @@ class VolumeManager(manager.CleanableManager,
|
||||
status='creating',
|
||||
attach_status=fields.VolumeAttachStatus.DETACHED,
|
||||
migration_status='target:%s' % volume['id'],
|
||||
use_quota=False, # Don't use quota for temporary volume
|
||||
**new_vol_values
|
||||
)
|
||||
new_volume.create()
|
||||
|
@ -0,0 +1,14 @@
|
||||
---
|
||||
fixes:
|
||||
- |
|
||||
`Bug #1923828 <https://bugs.launchpad.net/cinder/+bug/1923828>`_: Fixed
|
||||
quota usage sync counting temporary snapshots from backups and revert to
|
||||
snapshot.
|
||||
- |
|
||||
`Bug #1923829 <https://bugs.launchpad.net/cinder/+bug/1923829>`_: Fixed
|
||||
manually deleting temporary snapshots from backups and revert to snapshots
|
||||
after failure leads to incorrect quota usage.
|
||||
- |
|
||||
`Bug #1923830 <https://bugs.launchpad.net/cinder/+bug/1923830>`_: Fixed
|
||||
successfully backing up an in-use volume using a temporary snapshot instead
|
||||
of a clone leads to incorrect quota usage.
|
Loading…
Reference in New Issue
Block a user