Fix replica quotas allocation during share migration
Fixed the issue in the share migration where the replica quotas were not being accounted. Manila now tries to allocate share replica quotas for the migrated share if the destination share type has a `replication_type`. If the migration fails or get cancelled, the quotas will be reverted. Change-Id: Ie0a08a61c79dc8ea2a7a240363e94b26a80e64a4 Closes-Bug: 1910752
This commit is contained in:
parent
7e392f3acb
commit
6a6aa0f146
@ -39,8 +39,10 @@ from manila.message import message_field
|
|||||||
from manila import quota
|
from manila import quota
|
||||||
from manila import rpc
|
from manila import rpc
|
||||||
from manila.share import rpcapi as share_rpcapi
|
from manila.share import rpcapi as share_rpcapi
|
||||||
|
from manila.share import share_types
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
|
QUOTAS = quota.QUOTAS
|
||||||
|
|
||||||
scheduler_driver_opt = cfg.StrOpt('scheduler_driver',
|
scheduler_driver_opt = cfg.StrOpt('scheduler_driver',
|
||||||
default='manila.scheduler.drivers.'
|
default='manila.scheduler.drivers.'
|
||||||
@ -179,6 +181,8 @@ class SchedulerManager(manager.Manager):
|
|||||||
'migrate_share_to_host',
|
'migrate_share_to_host',
|
||||||
{'task_state': constants.TASK_STATE_MIGRATION_ERROR},
|
{'task_state': constants.TASK_STATE_MIGRATION_ERROR},
|
||||||
context, ex, request_spec)
|
context, ex, request_spec)
|
||||||
|
share_types.revert_allocated_share_type_quotas_during_migration(
|
||||||
|
context, share_ref, new_share_type_id)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
tgt_host = self.driver.host_passes_filters(
|
tgt_host = self.driver.host_passes_filters(
|
||||||
|
@ -1395,6 +1395,106 @@ class API(base.Base):
|
|||||||
|
|
||||||
return snapshot
|
return snapshot
|
||||||
|
|
||||||
|
def _modify_quotas_for_share_migration(self, context, share,
|
||||||
|
new_share_type):
|
||||||
|
"""Consume quotas for share migration.
|
||||||
|
|
||||||
|
If a share migration was requested and a new share type was provided,
|
||||||
|
quotas must be consumed from this share type. If no quotas are
|
||||||
|
available for shares, gigabytes, share replicas or replica gigabytes,
|
||||||
|
an error will be thrown.
|
||||||
|
"""
|
||||||
|
|
||||||
|
new_share_type_id = new_share_type['id']
|
||||||
|
|
||||||
|
if new_share_type_id == share['share_type_id']:
|
||||||
|
return
|
||||||
|
|
||||||
|
new_type_extra_specs = self.get_share_attributes_from_share_type(
|
||||||
|
new_share_type)
|
||||||
|
new_type_replication_type = new_type_extra_specs.get(
|
||||||
|
'replication_type', None)
|
||||||
|
|
||||||
|
deltas = {}
|
||||||
|
|
||||||
|
# NOTE(carloss): If a new share type with a replication type was
|
||||||
|
# specified, there is need to allocate quotas in the new share type.
|
||||||
|
# We won't remove the current consumed quotas, since both share
|
||||||
|
# instances will co-exist until the migration gets completed,
|
||||||
|
# cancelled or it fails.
|
||||||
|
if new_type_replication_type:
|
||||||
|
deltas['share_replicas'] = 1
|
||||||
|
deltas['replica_gigabytes'] = share['size']
|
||||||
|
|
||||||
|
deltas.update({
|
||||||
|
'share_type_id': new_share_type_id,
|
||||||
|
'shares': 1,
|
||||||
|
'gigabytes': share['size']
|
||||||
|
})
|
||||||
|
|
||||||
|
try:
|
||||||
|
reservations = QUOTAS.reserve(
|
||||||
|
context, project_id=share['project_id'],
|
||||||
|
user_id=share['user_id'], **deltas)
|
||||||
|
QUOTAS.commit(
|
||||||
|
context, reservations, project_id=share['project_id'],
|
||||||
|
user_id=share['user_id'], share_type_id=new_share_type_id)
|
||||||
|
except exception.OverQuota as e:
|
||||||
|
overs = e.kwargs['overs']
|
||||||
|
usages = e.kwargs['usages']
|
||||||
|
quotas = e.kwargs['quotas']
|
||||||
|
|
||||||
|
def _consumed(name):
|
||||||
|
return (usages[name]['reserved'] + usages[name]['in_use'])
|
||||||
|
|
||||||
|
if 'replica_gigabytes' in overs:
|
||||||
|
LOG.warning("Replica gigabytes quota exceeded "
|
||||||
|
"for %(s_pid)s, tried to migrate "
|
||||||
|
"%(s_size)sG share (%(d_consumed)dG of "
|
||||||
|
"%(d_quota)dG already consumed).", {
|
||||||
|
's_pid': context.project_id,
|
||||||
|
's_size': share['size'],
|
||||||
|
'd_consumed': _consumed(
|
||||||
|
'replica_gigabytes'),
|
||||||
|
'd_quota': quotas['replica_gigabytes']})
|
||||||
|
msg = _("Failed while migrating a share with replication "
|
||||||
|
"support. Maximum number of allowed "
|
||||||
|
"replica gigabytes is exceeded.")
|
||||||
|
raise exception.ShareReplicaSizeExceedsAvailableQuota(
|
||||||
|
message=msg)
|
||||||
|
|
||||||
|
if 'share_replicas' in overs:
|
||||||
|
LOG.warning("Quota exceeded for %(s_pid)s, "
|
||||||
|
"unable to migrate share-replica (%(d_consumed)d "
|
||||||
|
"of %(d_quota)d already consumed).", {
|
||||||
|
's_pid': context.project_id,
|
||||||
|
'd_consumed': _consumed('share_replicas'),
|
||||||
|
'd_quota': quotas['share_replicas']})
|
||||||
|
msg = _(
|
||||||
|
"Failed while migrating a share with replication "
|
||||||
|
"support. Maximum number of allowed share-replicas "
|
||||||
|
"is exceeded.")
|
||||||
|
raise exception.ShareReplicasLimitExceeded(msg)
|
||||||
|
|
||||||
|
if 'gigabytes' in overs:
|
||||||
|
LOG.warning("Quota exceeded for %(s_pid)s, "
|
||||||
|
"tried to migrate "
|
||||||
|
"%(s_size)sG share (%(d_consumed)dG of "
|
||||||
|
"%(d_quota)dG already consumed).", {
|
||||||
|
's_pid': context.project_id,
|
||||||
|
's_size': share['size'],
|
||||||
|
'd_consumed': _consumed('gigabytes'),
|
||||||
|
'd_quota': quotas['gigabytes']})
|
||||||
|
raise exception.ShareSizeExceedsAvailableQuota()
|
||||||
|
if 'shares' in overs:
|
||||||
|
LOG.warning("Quota exceeded for %(s_pid)s, "
|
||||||
|
"tried to migrate "
|
||||||
|
"share (%(d_consumed)d shares "
|
||||||
|
"already consumed).", {
|
||||||
|
's_pid': context.project_id,
|
||||||
|
'd_consumed': _consumed('shares')})
|
||||||
|
raise exception.ShareLimitExceeded(allowed=quotas['shares'])
|
||||||
|
|
||||||
def migration_start(
|
def migration_start(
|
||||||
self, context, share, dest_host, force_host_assisted_migration,
|
self, context, share, dest_host, force_host_assisted_migration,
|
||||||
preserve_metadata, writable, nondisruptive, preserve_snapshots,
|
preserve_metadata, writable, nondisruptive, preserve_snapshots,
|
||||||
@ -1479,6 +1579,8 @@ class API(base.Base):
|
|||||||
" given share %s has extra_spec "
|
" given share %s has extra_spec "
|
||||||
"'driver_handles_share_servers' as True.") % share['id']
|
"'driver_handles_share_servers' as True.") % share['id']
|
||||||
raise exception.InvalidInput(reason=msg)
|
raise exception.InvalidInput(reason=msg)
|
||||||
|
self._modify_quotas_for_share_migration(context, share,
|
||||||
|
new_share_type)
|
||||||
else:
|
else:
|
||||||
share_type = {}
|
share_type = {}
|
||||||
share_type_id = share_instance['share_type_id']
|
share_type_id = share_instance['share_type_id']
|
||||||
|
@ -1194,6 +1194,9 @@ class ShareManager(manager.SchedulerDependentManager):
|
|||||||
# NOTE(ganso): Cleaning up error'ed destination share instance from
|
# NOTE(ganso): Cleaning up error'ed destination share instance from
|
||||||
# database. It is assumed that driver cleans up leftovers in
|
# database. It is assumed that driver cleans up leftovers in
|
||||||
# backend when migration fails.
|
# backend when migration fails.
|
||||||
|
share_types.revert_allocated_share_type_quotas_during_migration(
|
||||||
|
context, src_share_instance,
|
||||||
|
src_share_instance['share_type_id'])
|
||||||
self._migration_delete_instance(context, dest_share_instance['id'])
|
self._migration_delete_instance(context, dest_share_instance['id'])
|
||||||
self._restore_migrating_snapshots_status(
|
self._restore_migrating_snapshots_status(
|
||||||
context, src_share_instance['id'])
|
context, src_share_instance['id'])
|
||||||
@ -1295,7 +1298,8 @@ class ShareManager(manager.SchedulerDependentManager):
|
|||||||
def migration_driver_continue(self, context):
|
def migration_driver_continue(self, context):
|
||||||
"""Invokes driver to continue migration of shares."""
|
"""Invokes driver to continue migration of shares."""
|
||||||
|
|
||||||
instances = self.db.share_instances_get_all_by_host(context, self.host)
|
instances = self.db.share_instances_get_all_by_host(
|
||||||
|
context, self.host, with_share_data=True)
|
||||||
|
|
||||||
for instance in instances:
|
for instance in instances:
|
||||||
|
|
||||||
@ -1354,6 +1358,10 @@ class ShareManager(manager.SchedulerDependentManager):
|
|||||||
|
|
||||||
except Exception:
|
except Exception:
|
||||||
|
|
||||||
|
(share_types.
|
||||||
|
revert_allocated_share_type_quotas_during_migration(
|
||||||
|
context, src_share_instance,
|
||||||
|
dest_share_instance['share_type_id']))
|
||||||
# NOTE(ganso): Cleaning up error'ed destination share
|
# NOTE(ganso): Cleaning up error'ed destination share
|
||||||
# instance from database. It is assumed that driver cleans
|
# instance from database. It is assumed that driver cleans
|
||||||
# up leftovers in backend when migration fails.
|
# up leftovers in backend when migration fails.
|
||||||
@ -1617,6 +1625,10 @@ class ShareManager(manager.SchedulerDependentManager):
|
|||||||
src_share_instance['id'],
|
src_share_instance['id'],
|
||||||
dest_share_instance['id'])
|
dest_share_instance['id'])
|
||||||
|
|
||||||
|
share_types.revert_allocated_share_type_quotas_during_migration(
|
||||||
|
context, dest_share_instance, src_share_instance['share_type_id'],
|
||||||
|
allow_deallocate_from_current_type=True)
|
||||||
|
|
||||||
self._migration_delete_instance(context, src_share_instance['id'])
|
self._migration_delete_instance(context, src_share_instance['id'])
|
||||||
|
|
||||||
def _migration_complete_instance(self, context, share_ref,
|
def _migration_complete_instance(self, context, share_ref,
|
||||||
@ -1702,6 +1714,9 @@ class ShareManager(manager.SchedulerDependentManager):
|
|||||||
self.db.share_update(
|
self.db.share_update(
|
||||||
context, share_ref['id'],
|
context, share_ref['id'],
|
||||||
{'task_state': constants.TASK_STATE_MIGRATION_ERROR})
|
{'task_state': constants.TASK_STATE_MIGRATION_ERROR})
|
||||||
|
# NOTE(carloss): No need to deallocate quotas allocated during
|
||||||
|
# the migration request, since both share instances still exist
|
||||||
|
# even they are set to an error state.
|
||||||
raise exception.ShareMigrationFailed(reason=msg)
|
raise exception.ShareMigrationFailed(reason=msg)
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
@ -1712,6 +1727,9 @@ class ShareManager(manager.SchedulerDependentManager):
|
|||||||
msg = _("Host-assisted migration completion failed for"
|
msg = _("Host-assisted migration completion failed for"
|
||||||
" share %s.") % share_ref['id']
|
" share %s.") % share_ref['id']
|
||||||
LOG.exception(msg)
|
LOG.exception(msg)
|
||||||
|
# NOTE(carloss): No need to deallocate quotas allocated during
|
||||||
|
# the migration request, since both source and destination
|
||||||
|
# instances will still exist
|
||||||
self.db.share_update(
|
self.db.share_update(
|
||||||
context, share_ref['id'],
|
context, share_ref['id'],
|
||||||
{'task_state': constants.TASK_STATE_MIGRATION_ERROR})
|
{'task_state': constants.TASK_STATE_MIGRATION_ERROR})
|
||||||
@ -1807,6 +1825,12 @@ class ShareManager(manager.SchedulerDependentManager):
|
|||||||
src_share_instance['id'],
|
src_share_instance['id'],
|
||||||
dest_share_instance['id'])
|
dest_share_instance['id'])
|
||||||
|
|
||||||
|
# NOTE(carloss): Won't revert allocated quotas for the share type here
|
||||||
|
# because the delete_instance_and_wait method will end up calling the
|
||||||
|
# delete_share_instance method here in the share manager. When the
|
||||||
|
# share instance deletion is requested in the share manager, Manila
|
||||||
|
# itself will take care of deallocating the existing quotas for the
|
||||||
|
# share instance
|
||||||
helper.delete_instance_and_wait(src_share_instance)
|
helper.delete_instance_and_wait(src_share_instance)
|
||||||
|
|
||||||
@utils.require_driver_initialized
|
@utils.require_driver_initialized
|
||||||
@ -1871,6 +1895,9 @@ class ShareManager(manager.SchedulerDependentManager):
|
|||||||
context, share_ref['id'],
|
context, share_ref['id'],
|
||||||
{'task_state': constants.TASK_STATE_MIGRATION_CANCELLED})
|
{'task_state': constants.TASK_STATE_MIGRATION_CANCELLED})
|
||||||
|
|
||||||
|
share_types.revert_allocated_share_type_quotas_during_migration(
|
||||||
|
context, src_share_instance, dest_share_instance['share_type_id'])
|
||||||
|
|
||||||
LOG.info("Share Migration for share %s"
|
LOG.info("Share Migration for share %s"
|
||||||
" was cancelled.", share_ref['id'])
|
" was cancelled.", share_ref['id'])
|
||||||
|
|
||||||
|
@ -30,9 +30,11 @@ from manila import context
|
|||||||
from manila import db
|
from manila import db
|
||||||
from manila import exception
|
from manila import exception
|
||||||
from manila.i18n import _
|
from manila.i18n import _
|
||||||
|
from manila import quota
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
|
QUOTAS = quota.QUOTAS
|
||||||
|
|
||||||
MIN_SIZE_KEY = "provisioning:min_share_size"
|
MIN_SIZE_KEY = "provisioning:min_share_size"
|
||||||
MAX_SIZE_KEY = "provisioning:max_share_size"
|
MAX_SIZE_KEY = "provisioning:max_share_size"
|
||||||
@ -460,3 +462,48 @@ def provision_filter_on_size(context, share_type, size):
|
|||||||
) % {'req_size': size_int, 'max_size': max_size,
|
) % {'req_size': size_int, 'max_size': max_size,
|
||||||
'sha_type': share_type['name']}
|
'sha_type': share_type['name']}
|
||||||
raise exception.InvalidInput(reason=msg)
|
raise exception.InvalidInput(reason=msg)
|
||||||
|
|
||||||
|
|
||||||
|
def revert_allocated_share_type_quotas_during_migration(
|
||||||
|
context, share, share_type_id,
|
||||||
|
allow_deallocate_from_current_type=False):
|
||||||
|
|
||||||
|
# If both new share type and share's share type ID, there is no need
|
||||||
|
# to revert quotas because new quotas weren't allocated, as share
|
||||||
|
# type changes weren't identified, unless it is a migration that was
|
||||||
|
# successfully completed
|
||||||
|
if ((share_type_id == share['share_type_id'])
|
||||||
|
and not allow_deallocate_from_current_type):
|
||||||
|
return
|
||||||
|
|
||||||
|
new_share_type = get_share_type(context, share_type_id)
|
||||||
|
new_type_extra_specs = new_share_type.get('extra_specs', None)
|
||||||
|
|
||||||
|
new_type_replication_type = None
|
||||||
|
if new_type_extra_specs:
|
||||||
|
new_type_replication_type = new_type_extra_specs.get(
|
||||||
|
'replication_type', None)
|
||||||
|
|
||||||
|
deltas = {}
|
||||||
|
|
||||||
|
if new_type_replication_type:
|
||||||
|
deltas['share_replicas'] = -1
|
||||||
|
deltas['replica_gigabytes'] = -share['size']
|
||||||
|
|
||||||
|
deltas.update({
|
||||||
|
'share_type_id': new_share_type['id'],
|
||||||
|
'shares': -1,
|
||||||
|
'gigabytes': -share['size']
|
||||||
|
})
|
||||||
|
|
||||||
|
try:
|
||||||
|
reservations = QUOTAS.reserve(
|
||||||
|
context, project_id=share['project_id'],
|
||||||
|
user_id=share['user_id'], **deltas)
|
||||||
|
except Exception:
|
||||||
|
LOG.exception("Failed to update usages for share_replicas and "
|
||||||
|
"replica_gigabytes.")
|
||||||
|
else:
|
||||||
|
QUOTAS.commit(
|
||||||
|
context, reservations, project_id=share['project_id'],
|
||||||
|
user_id=share['user_id'], share_type_id=share_type_id)
|
||||||
|
@ -33,6 +33,7 @@ from manila.scheduler.drivers import base
|
|||||||
from manila.scheduler.drivers import filter
|
from manila.scheduler.drivers import filter
|
||||||
from manila.scheduler import manager
|
from manila.scheduler import manager
|
||||||
from manila.share import rpcapi as share_rpcapi
|
from manila.share import rpcapi as share_rpcapi
|
||||||
|
from manila.share import share_types
|
||||||
from manila import test
|
from manila import test
|
||||||
from manila.tests import db_utils
|
from manila.tests import db_utils
|
||||||
from manila.tests import fake_share as fakes
|
from manila.tests import fake_share as fakes
|
||||||
@ -281,6 +282,9 @@ class SchedulerManagerTestCase(test.TestCase):
|
|||||||
self.mock_object(base.Scheduler,
|
self.mock_object(base.Scheduler,
|
||||||
'host_passes_filters',
|
'host_passes_filters',
|
||||||
mock.Mock(return_value=host))
|
mock.Mock(return_value=host))
|
||||||
|
self.mock_object(
|
||||||
|
share_types,
|
||||||
|
'revert_allocated_share_type_quotas_during_migration')
|
||||||
|
|
||||||
self.assertRaises(
|
self.assertRaises(
|
||||||
TypeError, self.manager.migrate_share_to_host,
|
TypeError, self.manager.migrate_share_to_host,
|
||||||
@ -293,6 +297,8 @@ class SchedulerManagerTestCase(test.TestCase):
|
|||||||
share_rpcapi.ShareAPI.migration_start.assert_called_once_with(
|
share_rpcapi.ShareAPI.migration_start.assert_called_once_with(
|
||||||
self.context, share, host.host, False, True, True, False, True,
|
self.context, share, host.host, False, True, True, False, True,
|
||||||
'fake_net_id', 'fake_type_id')
|
'fake_net_id', 'fake_type_id')
|
||||||
|
(share_types.revert_allocated_share_type_quotas_during_migration.
|
||||||
|
assert_called_once_with(self.context, share, 'fake_type_id'))
|
||||||
|
|
||||||
@ddt.data(exception.NoValidHost(reason='fake'), TypeError)
|
@ddt.data(exception.NoValidHost(reason='fake'), TypeError)
|
||||||
def test_migrate_share_to_host_exception(self, exc):
|
def test_migrate_share_to_host_exception(self, exc):
|
||||||
@ -307,6 +313,9 @@ class SchedulerManagerTestCase(test.TestCase):
|
|||||||
mock.Mock(side_effect=exc))
|
mock.Mock(side_effect=exc))
|
||||||
self.mock_object(db, 'share_update')
|
self.mock_object(db, 'share_update')
|
||||||
self.mock_object(db, 'share_instance_update')
|
self.mock_object(db, 'share_instance_update')
|
||||||
|
self.mock_object(
|
||||||
|
share_types,
|
||||||
|
'revert_allocated_share_type_quotas_during_migration')
|
||||||
|
|
||||||
capture = (exception.NoValidHost if
|
capture = (exception.NoValidHost if
|
||||||
isinstance(exc, exception.NoValidHost) else TypeError)
|
isinstance(exc, exception.NoValidHost) else TypeError)
|
||||||
@ -325,6 +334,8 @@ class SchedulerManagerTestCase(test.TestCase):
|
|||||||
db.share_instance_update.assert_called_once_with(
|
db.share_instance_update.assert_called_once_with(
|
||||||
self.context, share.instance['id'],
|
self.context, share.instance['id'],
|
||||||
{'status': constants.STATUS_AVAILABLE})
|
{'status': constants.STATUS_AVAILABLE})
|
||||||
|
(share_types.revert_allocated_share_type_quotas_during_migration.
|
||||||
|
assert_called_once_with(self.context, share, 'fake_type_id'))
|
||||||
|
|
||||||
def test_manage_share(self):
|
def test_manage_share(self):
|
||||||
|
|
||||||
|
@ -3222,6 +3222,92 @@ class ShareAPITestCase(test.TestCase):
|
|||||||
|
|
||||||
self.assertEqual(fake_el, out)
|
self.assertEqual(fake_el, out)
|
||||||
|
|
||||||
|
@ddt.data(True, False)
|
||||||
|
def test__modify_quotas_for_share_migration(self, new_replication_type):
|
||||||
|
extra_specs = (
|
||||||
|
{'replication_type': 'readable'} if new_replication_type else {})
|
||||||
|
share = db_utils.create_share()
|
||||||
|
share_type = db_utils.create_share_type(extra_specs=extra_specs)
|
||||||
|
|
||||||
|
expected_deltas = {
|
||||||
|
'project_id': share['project_id'],
|
||||||
|
'user_id': share['user_id'],
|
||||||
|
'shares': 1,
|
||||||
|
'gigabytes': share['size'],
|
||||||
|
'share_type_id': share_type['id']
|
||||||
|
}
|
||||||
|
|
||||||
|
if new_replication_type:
|
||||||
|
expected_deltas.update({
|
||||||
|
'share_replicas': 1,
|
||||||
|
'replica_gigabytes': share['size'],
|
||||||
|
})
|
||||||
|
reservations = 'reservations'
|
||||||
|
|
||||||
|
mock_specs_get = self.mock_object(
|
||||||
|
self.api, 'get_share_attributes_from_share_type',
|
||||||
|
mock.Mock(return_value=extra_specs))
|
||||||
|
mock_reserve = self.mock_object(
|
||||||
|
quota.QUOTAS, 'reserve', mock.Mock(return_value=reservations))
|
||||||
|
mock_commit = self.mock_object(quota.QUOTAS, 'commit')
|
||||||
|
|
||||||
|
self.api._modify_quotas_for_share_migration(
|
||||||
|
self.context, share, share_type)
|
||||||
|
|
||||||
|
mock_specs_get.assert_called_once_with(share_type)
|
||||||
|
mock_reserve.assert_called_once_with(
|
||||||
|
self.context, **expected_deltas)
|
||||||
|
mock_commit.assert_called_once_with(
|
||||||
|
self.context, reservations, project_id=share['project_id'],
|
||||||
|
user_id=share['user_id'], share_type_id=share_type['id'])
|
||||||
|
|
||||||
|
@ddt.data(
|
||||||
|
('replica_gigabytes', exception.ShareReplicaSizeExceedsAvailableQuota),
|
||||||
|
('share_replicas', exception.ShareReplicasLimitExceeded),
|
||||||
|
('gigabytes', exception.ShareSizeExceedsAvailableQuota)
|
||||||
|
)
|
||||||
|
@ddt.unpack
|
||||||
|
def test__modify_quotas_for_share_migration_reservation_failed(
|
||||||
|
self, over_resource, expected_exception):
|
||||||
|
extra_specs = {'replication_type': 'readable'}
|
||||||
|
share = db_utils.create_share()
|
||||||
|
share_type = db_utils.create_share_type(extra_specs=extra_specs)
|
||||||
|
expected_deltas = {
|
||||||
|
'project_id': share['project_id'],
|
||||||
|
'user_id': share['user_id'],
|
||||||
|
'share_replicas': 1,
|
||||||
|
'shares': 1,
|
||||||
|
'gigabytes': share['size'],
|
||||||
|
'replica_gigabytes': share['size'],
|
||||||
|
'share_type_id': share_type['id']
|
||||||
|
}
|
||||||
|
usages = {
|
||||||
|
over_resource: {
|
||||||
|
'reserved': 'fake',
|
||||||
|
'in_use': 'fake'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
quotas = {
|
||||||
|
over_resource: 'fake'
|
||||||
|
}
|
||||||
|
|
||||||
|
effect_exc = exception.OverQuota(
|
||||||
|
overs=[over_resource], usages=usages, quotas=quotas)
|
||||||
|
mock_specs_get = self.mock_object(
|
||||||
|
self.api, 'get_share_attributes_from_share_type',
|
||||||
|
mock.Mock(return_value=extra_specs))
|
||||||
|
mock_reserve = self.mock_object(
|
||||||
|
quota.QUOTAS, 'reserve', mock.Mock(side_effect=effect_exc))
|
||||||
|
|
||||||
|
self.assertRaises(
|
||||||
|
expected_exception,
|
||||||
|
self.api._modify_quotas_for_share_migration,
|
||||||
|
self.context, share, share_type
|
||||||
|
)
|
||||||
|
|
||||||
|
mock_specs_get.assert_called_once_with(share_type)
|
||||||
|
mock_reserve.assert_called_once_with(self.context, **expected_deltas)
|
||||||
|
|
||||||
@ddt.data({'share_type': True, 'share_net': True, 'dhss': True},
|
@ddt.data({'share_type': True, 'share_net': True, 'dhss': True},
|
||||||
{'share_type': False, 'share_net': True, 'dhss': True},
|
{'share_type': False, 'share_net': True, 'dhss': True},
|
||||||
{'share_type': False, 'share_net': False, 'dhss': True},
|
{'share_type': False, 'share_net': False, 'dhss': True},
|
||||||
@ -3281,6 +3367,7 @@ class ShareAPITestCase(test.TestCase):
|
|||||||
self.mock_object(db_api, 'share_update')
|
self.mock_object(db_api, 'share_update')
|
||||||
self.mock_object(db_api, 'service_get_by_args',
|
self.mock_object(db_api, 'service_get_by_args',
|
||||||
mock.Mock(return_value=service))
|
mock.Mock(return_value=service))
|
||||||
|
self.mock_object(share_api.API, '_modify_quotas_for_share_migration')
|
||||||
|
|
||||||
if share_type:
|
if share_type:
|
||||||
self.api.migration_start(self.context, share, host, False, True,
|
self.api.migration_start(self.context, share, host, False, True,
|
||||||
@ -3306,6 +3393,9 @@ class ShareAPITestCase(test.TestCase):
|
|||||||
db_api.share_instance_update.assert_called_once_with(
|
db_api.share_instance_update.assert_called_once_with(
|
||||||
self.context, share.instance['id'],
|
self.context, share.instance['id'],
|
||||||
{'status': constants.STATUS_MIGRATING})
|
{'status': constants.STATUS_MIGRATING})
|
||||||
|
if share_type:
|
||||||
|
(share_api.API._modify_quotas_for_share_migration.
|
||||||
|
assert_called_once_with(self.context, share, fake_type_2))
|
||||||
|
|
||||||
def test_migration_start_with_new_share_type_limit(self):
|
def test_migration_start_with_new_share_type_limit(self):
|
||||||
host = 'fake2@backend#pool'
|
host = 'fake2@backend#pool'
|
||||||
@ -3363,6 +3453,7 @@ class ShareAPITestCase(test.TestCase):
|
|||||||
self.mock_object(db_api, 'share_update')
|
self.mock_object(db_api, 'share_update')
|
||||||
self.mock_object(db_api, 'service_get_by_args',
|
self.mock_object(db_api, 'service_get_by_args',
|
||||||
mock.Mock(return_value=service))
|
mock.Mock(return_value=service))
|
||||||
|
self.mock_object(share_api.API, '_modify_quotas_for_share_migration')
|
||||||
|
|
||||||
self.assertRaises(exception.InvalidShare,
|
self.assertRaises(exception.InvalidShare,
|
||||||
self.api.migration_start,
|
self.api.migration_start,
|
||||||
@ -3429,6 +3520,7 @@ class ShareAPITestCase(test.TestCase):
|
|||||||
host='fake@backend#pool', share_type_id=fake_type['id'])
|
host='fake@backend#pool', share_type_id=fake_type['id'])
|
||||||
|
|
||||||
self.mock_object(utils, 'validate_service_host')
|
self.mock_object(utils, 'validate_service_host')
|
||||||
|
self.mock_object(share_api.API, '_modify_quotas_for_share_migration')
|
||||||
|
|
||||||
self.assertRaises(
|
self.assertRaises(
|
||||||
exception.InvalidInput, self.api.migration_start, self.context,
|
exception.InvalidInput, self.api.migration_start, self.context,
|
||||||
|
@ -5885,6 +5885,9 @@ class ShareManagerTestCase(test.TestCase):
|
|||||||
self.mock_object(self.share_manager, '_migration_delete_instance')
|
self.mock_object(self.share_manager, '_migration_delete_instance')
|
||||||
self.mock_object(migration_api.ShareMigrationHelper,
|
self.mock_object(migration_api.ShareMigrationHelper,
|
||||||
'apply_new_access_rules')
|
'apply_new_access_rules')
|
||||||
|
self.mock_object(
|
||||||
|
share_types,
|
||||||
|
'revert_allocated_share_type_quotas_during_migration')
|
||||||
self.mock_object(
|
self.mock_object(
|
||||||
self.share_manager.db,
|
self.share_manager.db,
|
||||||
'share_snapshot_instance_get_all_with_filters',
|
'share_snapshot_instance_get_all_with_filters',
|
||||||
@ -5943,6 +5946,11 @@ class ShareManagerTestCase(test.TestCase):
|
|||||||
el_create.assert_called_once_with(self.context, el)
|
el_create.assert_called_once_with(self.context, el)
|
||||||
else:
|
else:
|
||||||
el_create.assert_not_called()
|
el_create.assert_not_called()
|
||||||
|
(share_types.
|
||||||
|
revert_allocated_share_type_quotas_during_migration.
|
||||||
|
assert_called_once_with(
|
||||||
|
self.context, dest_instance, src_instance['share_type_id'],
|
||||||
|
allow_deallocate_from_current_type=True))
|
||||||
|
|
||||||
def test__migration_complete_host_assisted(self):
|
def test__migration_complete_host_assisted(self):
|
||||||
|
|
||||||
@ -6017,6 +6025,9 @@ class ShareManagerTestCase(test.TestCase):
|
|||||||
self.mock_object(self.share_manager.driver, 'migration_cancel')
|
self.mock_object(self.share_manager.driver, 'migration_cancel')
|
||||||
self.mock_object(helper, 'cleanup_new_instance')
|
self.mock_object(helper, 'cleanup_new_instance')
|
||||||
self.mock_object(self.share_manager, '_reset_read_only_access_rules')
|
self.mock_object(self.share_manager, '_reset_read_only_access_rules')
|
||||||
|
self.mock_object(
|
||||||
|
share_types,
|
||||||
|
'revert_allocated_share_type_quotas_during_migration')
|
||||||
|
|
||||||
self.share_manager.migration_cancel(
|
self.share_manager.migration_cancel(
|
||||||
self.context, instance_1['id'], instance_2['id'])
|
self.context, instance_1['id'], instance_2['id'])
|
||||||
@ -6062,6 +6073,9 @@ class ShareManagerTestCase(test.TestCase):
|
|||||||
|
|
||||||
self.share_manager.db.share_instance_update.assert_has_calls(
|
self.share_manager.db.share_instance_update.assert_has_calls(
|
||||||
share_instance_update_calls)
|
share_instance_update_calls)
|
||||||
|
(share_types.revert_allocated_share_type_quotas_during_migration.
|
||||||
|
assert_called_once_with(
|
||||||
|
self.context, instance_1, instance_2['share_type_id']))
|
||||||
|
|
||||||
@ddt.data(True, False)
|
@ddt.data(True, False)
|
||||||
def test__reset_read_only_access_rules(self, supress_errors):
|
def test__reset_read_only_access_rules(self, supress_errors):
|
||||||
|
@ -29,8 +29,10 @@ from manila.common import constants
|
|||||||
from manila import context
|
from manila import context
|
||||||
from manila import db
|
from manila import db
|
||||||
from manila import exception
|
from manila import exception
|
||||||
|
from manila import quota
|
||||||
from manila.share import share_types
|
from manila.share import share_types
|
||||||
from manila import test
|
from manila import test
|
||||||
|
from manila.tests import db_utils
|
||||||
|
|
||||||
|
|
||||||
def create_share_type_dict(extra_specs=None):
|
def create_share_type_dict(extra_specs=None):
|
||||||
@ -581,3 +583,56 @@ class ShareTypesTestCase(test.TestCase):
|
|||||||
share_types.provision_filter_on_size(self.context, type4, "24")
|
share_types.provision_filter_on_size(self.context, type4, "24")
|
||||||
share_types.provision_filter_on_size(self.context, type4, "99")
|
share_types.provision_filter_on_size(self.context, type4, "99")
|
||||||
share_types.provision_filter_on_size(self.context, type4, "30")
|
share_types.provision_filter_on_size(self.context, type4, "30")
|
||||||
|
|
||||||
|
@ddt.data(True, False)
|
||||||
|
def test__revert_allocated_share_type_quotas_during_migration(
|
||||||
|
self, failed_on_reservation):
|
||||||
|
fake_type_id = 'fake_1'
|
||||||
|
extra_specs = {'replication_type': 'readable'}
|
||||||
|
source_instance = db_utils.create_share()
|
||||||
|
dest_instance = db_utils.create_share(
|
||||||
|
share_type_id=fake_type_id)
|
||||||
|
share_type = {
|
||||||
|
'name': 'fake_share_type',
|
||||||
|
'extra_specs': extra_specs,
|
||||||
|
'is_public': True,
|
||||||
|
'id': 'fake_type_id'
|
||||||
|
}
|
||||||
|
|
||||||
|
expected_deltas = {
|
||||||
|
'project_id': dest_instance['project_id'],
|
||||||
|
'user_id': dest_instance['user_id'],
|
||||||
|
'share_replicas': -1,
|
||||||
|
'replica_gigabytes': -dest_instance['size'],
|
||||||
|
'share_type_id': share_type['id'],
|
||||||
|
'shares': -1,
|
||||||
|
'gigabytes': -dest_instance['size'],
|
||||||
|
}
|
||||||
|
reservations = 'reservations'
|
||||||
|
reservation_action = (
|
||||||
|
mock.Mock(side_effect=exception.ManilaException(message='fake'))
|
||||||
|
if failed_on_reservation else mock.Mock(return_value=reservations))
|
||||||
|
|
||||||
|
mock_type_get = self.mock_object(
|
||||||
|
share_types, 'get_share_type', mock.Mock(return_value=share_type))
|
||||||
|
mock_reserve = self.mock_object(
|
||||||
|
quota.QUOTAS, 'reserve', reservation_action)
|
||||||
|
mock_commit = self.mock_object(quota.QUOTAS, 'commit')
|
||||||
|
mock_log = self.mock_object(share_types.LOG, 'exception')
|
||||||
|
|
||||||
|
share_types.revert_allocated_share_type_quotas_during_migration(
|
||||||
|
self.context, source_instance, share_type['id'], dest_instance)
|
||||||
|
|
||||||
|
if not failed_on_reservation:
|
||||||
|
mock_commit.assert_called_once_with(
|
||||||
|
self.context, reservations,
|
||||||
|
project_id=dest_instance['project_id'],
|
||||||
|
user_id=dest_instance['user_id'],
|
||||||
|
share_type_id=share_type['id'])
|
||||||
|
else:
|
||||||
|
mock_log.assert_called_once()
|
||||||
|
|
||||||
|
mock_type_get.assert_called_once_with(
|
||||||
|
self.context, share_type['id'])
|
||||||
|
mock_reserve.assert_called_once_with(
|
||||||
|
self.context, **expected_deltas)
|
||||||
|
@ -0,0 +1,7 @@
|
|||||||
|
---
|
||||||
|
fixes:
|
||||||
|
- |
|
||||||
|
Fixed the issue of not accounting replica quotas while triggering the
|
||||||
|
migration for a given share. Please refer to the
|
||||||
|
`Launchpad bug #1910752 <https://bugs.launchpad.net/manila/+bug/1910752>`_
|
||||||
|
for more details.
|
Loading…
Reference in New Issue
Block a user