Change misleading user message when user
services are down enhance the user message when all of the share manager services are down or are still initializing removed a duplicate test: test_create_share_non_admin Closes-Bug: #1886690 Change-Id: I168564a5b054d17762ad668ebbe4f5e7b562197b
This commit is contained in:
parent
b24ef91f2c
commit
ecda09a14d
@ -148,6 +148,12 @@ class Detail(object):
|
|||||||
"increase the network port quotas or free up some ports and retry. "
|
"increase the network port quotas or free up some ports and retry. "
|
||||||
"If this doesn't work, contact your administrator to troubleshoot "
|
"If this doesn't work, contact your administrator to troubleshoot "
|
||||||
"issues with your network."))
|
"issues with your network."))
|
||||||
|
SHARE_BACKEND_NOT_READY_YET = (
|
||||||
|
'028',
|
||||||
|
_("No storage could be allocated for this share "
|
||||||
|
"request. Share back end services are not "
|
||||||
|
"ready yet. Contact your administrator in case "
|
||||||
|
"retrying does not help."))
|
||||||
|
|
||||||
ALL = (
|
ALL = (
|
||||||
UNKNOWN_ERROR,
|
UNKNOWN_ERROR,
|
||||||
@ -177,6 +183,7 @@ class Detail(object):
|
|||||||
MISSING_SECURITY_SERVICE,
|
MISSING_SECURITY_SERVICE,
|
||||||
DRIVER_FAILED_TRANSFER_ACCEPT,
|
DRIVER_FAILED_TRANSFER_ACCEPT,
|
||||||
SHARE_NETWORK_PORT_QUOTA_LIMIT_EXCEEDED,
|
SHARE_NETWORK_PORT_QUOTA_LIMIT_EXCEEDED,
|
||||||
|
SHARE_BACKEND_NOT_READY_YET,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Exception and detail mappings
|
# Exception and detail mappings
|
||||||
|
@ -241,6 +241,20 @@ class FilterScheduler(base.Scheduler):
|
|||||||
# Note: remember, we are using an iterator here. So only
|
# Note: remember, we are using an iterator here. So only
|
||||||
# traverse this list once.
|
# traverse this list once.
|
||||||
hosts = self.host_manager.get_all_host_states_share(elevated)
|
hosts = self.host_manager.get_all_host_states_share(elevated)
|
||||||
|
if not hosts:
|
||||||
|
msg = _("No storage could be allocated for this share "
|
||||||
|
"request. Share back end services are not "
|
||||||
|
"ready yet. Contact your administrator in case "
|
||||||
|
"retrying does not help.")
|
||||||
|
LOG.error(msg)
|
||||||
|
self.message_api.create(
|
||||||
|
context,
|
||||||
|
message_field.Action.CREATE,
|
||||||
|
context.project_id,
|
||||||
|
resource_type=message_field.Resource.SHARE,
|
||||||
|
resource_id=request_spec.get('share_id', None),
|
||||||
|
detail=message_field.Detail.SHARE_BACKEND_NOT_READY_YET)
|
||||||
|
raise exception.WillNotSchedule(msg)
|
||||||
|
|
||||||
# Filter local hosts based on requirements ...
|
# Filter local hosts based on requirements ...
|
||||||
hosts, last_filter = self.host_manager.get_filtered_hosts(
|
hosts, last_filter = self.host_manager.get_filtered_hosts(
|
||||||
|
@ -145,43 +145,28 @@ class FilterSchedulerTestCase(test_base.SchedulerTestCase):
|
|||||||
self.assertDictEqual(fake_type, retval['resource_type'])
|
self.assertDictEqual(fake_type, retval['resource_type'])
|
||||||
|
|
||||||
def test_create_share_no_hosts(self):
|
def test_create_share_no_hosts(self):
|
||||||
# Ensure empty hosts/child_zones result in NoValidHosts exception.
|
# Ensure empty hosts/child_zones result in WillNotSchedule exception.
|
||||||
sched = fakes.FakeFilterScheduler()
|
sched = fakes.FakeFilterScheduler()
|
||||||
fake_context = context.RequestContext('user', 'project')
|
fake_context = context.RequestContext('user', 'project')
|
||||||
|
create_mock_message = self.mock_object(sched.message_api, 'create')
|
||||||
request_spec = {
|
request_spec = {
|
||||||
'share_properties': {'project_id': 1, 'size': 1},
|
'share_properties': {'project_id': 1, 'size': 1},
|
||||||
'share_instance_properties': {},
|
'share_instance_properties': {},
|
||||||
'share_type': {'name': 'NFS'},
|
'share_type': {'name': 'NFS'},
|
||||||
'share_id': 'fake-id1',
|
'share_id': 'fake-id1',
|
||||||
}
|
}
|
||||||
self.assertRaises(exception.NoValidHost, sched.schedule_create_share,
|
self.assertRaises(exception.WillNotSchedule,
|
||||||
fake_context, request_spec, {})
|
sched.schedule_create_share,
|
||||||
|
fake_context,
|
||||||
@mock.patch('manila.scheduler.host_manager.HostManager.'
|
request_spec,
|
||||||
'get_all_host_states_share')
|
{})
|
||||||
def test_create_share_non_admin(self, _mock_get_all_host_states):
|
create_mock_message.assert_called_once_with(
|
||||||
# Test creating a volume locally using create_volume, passing
|
fake_context,
|
||||||
# a non-admin context. DB actions should work.
|
message_field.Action.CREATE,
|
||||||
self.was_admin = False
|
fake_context.project_id,
|
||||||
|
resource_type=message_field.Resource.SHARE,
|
||||||
def fake_get(context, *args, **kwargs):
|
resource_id=request_spec.get('share_id', None),
|
||||||
# Make sure this is called with admin context, even though
|
detail=message_field.Detail.SHARE_BACKEND_NOT_READY_YET)
|
||||||
# we're using user context below.
|
|
||||||
self.was_admin = context.is_admin
|
|
||||||
return {}
|
|
||||||
|
|
||||||
sched = fakes.FakeFilterScheduler()
|
|
||||||
_mock_get_all_host_states.side_effect = fake_get
|
|
||||||
fake_context = context.RequestContext('user', 'project')
|
|
||||||
request_spec = {
|
|
||||||
'share_properties': {'project_id': 1, 'size': 1},
|
|
||||||
'share_instance_properties': {},
|
|
||||||
'share_type': {'name': 'NFS'},
|
|
||||||
'share_id': 'fake-id1',
|
|
||||||
}
|
|
||||||
self.assertRaises(exception.NoValidHost, sched.schedule_create_share,
|
|
||||||
fake_context, request_spec, {})
|
|
||||||
self.assertTrue(self.was_admin)
|
|
||||||
|
|
||||||
@ddt.data(
|
@ddt.data(
|
||||||
{'name': 'foo'},
|
{'name': 'foo'},
|
||||||
@ -420,10 +405,13 @@ class FilterSchedulerTestCase(test_base.SchedulerTestCase):
|
|||||||
self.assertRaises(exception.InvalidParameterValue,
|
self.assertRaises(exception.InvalidParameterValue,
|
||||||
fakes.FakeFilterScheduler)
|
fakes.FakeFilterScheduler)
|
||||||
|
|
||||||
def test_retry_disabled(self):
|
@mock.patch('manila.scheduler.host_manager.HostManager.'
|
||||||
|
'get_all_host_states_share')
|
||||||
|
def test_retry_disabled(self, _mock_get_all_host_states):
|
||||||
# Retry info should not get populated when re-scheduling is off.
|
# Retry info should not get populated when re-scheduling is off.
|
||||||
self.flags(scheduler_max_attempts=1)
|
self.flags(scheduler_max_attempts=1)
|
||||||
sched = fakes.FakeFilterScheduler()
|
sched = fakes.FakeFilterScheduler()
|
||||||
|
sched.host_manager = fakes.FakeHostManager()
|
||||||
request_spec = {
|
request_spec = {
|
||||||
'share_type': {'name': 'iSCSI'},
|
'share_type': {'name': 'iSCSI'},
|
||||||
'share_properties': {'project_id': 1, 'size': 1},
|
'share_properties': {'project_id': 1, 'size': 1},
|
||||||
@ -436,10 +424,13 @@ class FilterSchedulerTestCase(test_base.SchedulerTestCase):
|
|||||||
# Should not have retry info in the populated filter properties.
|
# Should not have retry info in the populated filter properties.
|
||||||
self.assertNotIn("retry", filter_properties)
|
self.assertNotIn("retry", filter_properties)
|
||||||
|
|
||||||
def test_retry_attempt_one(self):
|
@mock.patch('manila.scheduler.host_manager.HostManager.'
|
||||||
|
'get_all_host_states_share')
|
||||||
|
def test_retry_attempt_one(self, _mock_get_all_host_states):
|
||||||
# Test retry logic on initial scheduling attempt.
|
# Test retry logic on initial scheduling attempt.
|
||||||
self.flags(scheduler_max_attempts=2)
|
self.flags(scheduler_max_attempts=2)
|
||||||
sched = fakes.FakeFilterScheduler()
|
sched = fakes.FakeFilterScheduler()
|
||||||
|
sched.host_manager = fakes.FakeHostManager()
|
||||||
request_spec = {
|
request_spec = {
|
||||||
'share_type': {'name': 'iSCSI'},
|
'share_type': {'name': 'iSCSI'},
|
||||||
'share_properties': {'project_id': 1, 'size': 1},
|
'share_properties': {'project_id': 1, 'size': 1},
|
||||||
@ -452,10 +443,13 @@ class FilterSchedulerTestCase(test_base.SchedulerTestCase):
|
|||||||
num_attempts = filter_properties['retry']['num_attempts']
|
num_attempts = filter_properties['retry']['num_attempts']
|
||||||
self.assertEqual(1, num_attempts)
|
self.assertEqual(1, num_attempts)
|
||||||
|
|
||||||
def test_retry_attempt_two(self):
|
@mock.patch('manila.scheduler.host_manager.HostManager.'
|
||||||
|
'get_all_host_states_share')
|
||||||
|
def test_retry_attempt_two(self, _mock_get_all_host_states):
|
||||||
# Test retry logic when re-scheduling.
|
# Test retry logic when re-scheduling.
|
||||||
self.flags(scheduler_max_attempts=2)
|
self.flags(scheduler_max_attempts=2)
|
||||||
sched = fakes.FakeFilterScheduler()
|
sched = fakes.FakeFilterScheduler()
|
||||||
|
sched.host_manager = fakes.FakeHostManager()
|
||||||
request_spec = {
|
request_spec = {
|
||||||
'share_type': {'name': 'iSCSI'},
|
'share_type': {'name': 'iSCSI'},
|
||||||
'share_properties': {'project_id': 1, 'size': 1},
|
'share_properties': {'project_id': 1, 'size': 1},
|
||||||
@ -643,7 +637,7 @@ class FilterSchedulerTestCase(test_base.SchedulerTestCase):
|
|||||||
self.mock_object(sched.host_manager, 'get_filtered_hosts',
|
self.mock_object(sched.host_manager, 'get_filtered_hosts',
|
||||||
mock.Mock(return_value=(None, 'filter')))
|
mock.Mock(return_value=(None, 'filter')))
|
||||||
|
|
||||||
self.assertRaises(exception.NoValidHost,
|
self.assertRaises(exception.WillNotSchedule,
|
||||||
sched.schedule_create_replica,
|
sched.schedule_create_replica,
|
||||||
self.context, request_spec, {})
|
self.context, request_spec, {})
|
||||||
|
|
||||||
|
@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
fixes:
|
||||||
|
- |
|
||||||
|
Fixed `bug #1886690 <https://bugs.launchpad.net/manila/+bug/1886690>`_
|
||||||
|
that was a misleading user message when share services are down. The
|
||||||
|
message is now clear and descriptive.
|
Loading…
Reference in New Issue
Block a user