Merge "Cosmetic changes to scheduler"
This commit is contained in:
commit
38b54d26f3
@ -574,8 +574,8 @@ class PasteAppNotFound(NotFound):
|
||||
message = _("Could not load paste app '%(name)s' from %(path)s")
|
||||
|
||||
|
||||
class NoValidHost(CinderException):
|
||||
message = _("No valid host was found. %(reason)s")
|
||||
class NoValidBackend(CinderException):
|
||||
message = _("No valid backend was found. %(reason)s")
|
||||
|
||||
|
||||
class NoMoreTargets(CinderException):
|
||||
|
@ -115,15 +115,21 @@ class Scheduler(object):
|
||||
host,
|
||||
capabilities)
|
||||
|
||||
def host_passes_filters(self, context, host, request_spec,
|
||||
def host_passes_filters(self, context, backend, request_spec,
|
||||
filter_properties):
|
||||
"""Check if the specified host passes the filters."""
|
||||
raise NotImplementedError(_("Must implement host_passes_filters"))
|
||||
"""Check if the specified backend passes the filters."""
|
||||
raise NotImplementedError(_("Must implement backend_passes_filters"))
|
||||
|
||||
def find_retype_host(self, context, request_spec, filter_properties=None,
|
||||
migration_policy='never'):
|
||||
"""Find a host that can accept the volume with its new type."""
|
||||
raise NotImplementedError(_("Must implement find_retype_host"))
|
||||
"""Find a backend that can accept the volume with its new type."""
|
||||
raise NotImplementedError(_("Must implement find_retype_backend"))
|
||||
|
||||
# NOTE(geguileo): For backward compatibility with out of tree Schedulers
|
||||
# we don't change host_passes_filters or find_retype_host method names but
|
||||
# create an "alias" for them with the right name instead.
|
||||
backend_passes_filters = host_passes_filters
|
||||
find_retype_backend = find_retype_host
|
||||
|
||||
def schedule(self, context, topic, method, *_args, **_kwargs):
|
||||
"""Must override schedule method for scheduler to work."""
|
||||
|
@ -66,15 +66,16 @@ class FilterScheduler(driver.Scheduler):
|
||||
request_spec_list,
|
||||
filter_properties_list):
|
||||
|
||||
weighed_host = self._schedule_group(
|
||||
weighed_backend = self._schedule_group(
|
||||
context,
|
||||
request_spec_list,
|
||||
filter_properties_list)
|
||||
|
||||
if not weighed_host:
|
||||
raise exception.NoValidHost(reason=_("No weighed hosts available"))
|
||||
if not weighed_backend:
|
||||
raise exception.NoValidBackend(reason=_("No weighed backends "
|
||||
"available"))
|
||||
|
||||
backend = weighed_host.obj
|
||||
backend = weighed_backend.obj
|
||||
updated_group = driver.group_update_db(context, group, backend.host,
|
||||
backend.cluster_name)
|
||||
|
||||
@ -85,17 +86,18 @@ class FilterScheduler(driver.Scheduler):
|
||||
request_spec_list,
|
||||
group_filter_properties,
|
||||
filter_properties_list):
|
||||
weighed_host = self._schedule_generic_group(
|
||||
weighed_backend = self._schedule_generic_group(
|
||||
context,
|
||||
group_spec,
|
||||
request_spec_list,
|
||||
group_filter_properties,
|
||||
filter_properties_list)
|
||||
|
||||
if not weighed_host:
|
||||
raise exception.NoValidHost(reason=_("No weighed hosts available"))
|
||||
if not weighed_backend:
|
||||
raise exception.NoValidBackend(reason=_("No weighed backends "
|
||||
"available"))
|
||||
|
||||
backend = weighed_host.obj
|
||||
backend = weighed_backend.obj
|
||||
|
||||
updated_group = driver.generic_group_update_db(context, group,
|
||||
backend.host,
|
||||
@ -104,13 +106,13 @@ class FilterScheduler(driver.Scheduler):
|
||||
self.volume_rpcapi.create_group(context, updated_group)
|
||||
|
||||
def schedule_create_volume(self, context, request_spec, filter_properties):
|
||||
weighed_host = self._schedule(context, request_spec,
|
||||
filter_properties)
|
||||
backend = self._schedule(context, request_spec, filter_properties)
|
||||
|
||||
if not weighed_host:
|
||||
raise exception.NoValidHost(reason=_("No weighed hosts available"))
|
||||
if not backend:
|
||||
raise exception.NoValidBackend(reason=_("No weighed backends "
|
||||
"available"))
|
||||
|
||||
backend = weighed_host.obj
|
||||
backend = backend.obj
|
||||
volume_id = request_spec['volume_id']
|
||||
|
||||
updated_volume = driver.volume_update_db(context, volume_id,
|
||||
@ -126,25 +128,25 @@ class FilterScheduler(driver.Scheduler):
|
||||
filter_properties,
|
||||
allow_reschedule=True)
|
||||
|
||||
def host_passes_filters(self, context, host, request_spec,
|
||||
filter_properties):
|
||||
"""Check if the specified host passes the filters."""
|
||||
weighed_hosts = self._get_weighted_candidates(context, request_spec,
|
||||
filter_properties)
|
||||
for weighed_host in weighed_hosts:
|
||||
host_state = weighed_host.obj
|
||||
if host_state.backend_id == host:
|
||||
return host_state
|
||||
def backend_passes_filters(self, context, backend, request_spec,
|
||||
filter_properties):
|
||||
"""Check if the specified backend passes the filters."""
|
||||
weighed_backends = self._get_weighted_candidates(context, request_spec,
|
||||
filter_properties)
|
||||
for weighed_backend in weighed_backends:
|
||||
backend_state = weighed_backend.obj
|
||||
if backend_state.backend_id == backend:
|
||||
return backend_state
|
||||
|
||||
volume_id = request_spec.get('volume_id', '??volume_id missing??')
|
||||
raise exception.NoValidHost(reason=_('Cannot place volume %(id)s on '
|
||||
'%(host)s') %
|
||||
{'id': volume_id,
|
||||
'host': host})
|
||||
raise exception.NoValidBackend(reason=_('Cannot place volume %(id)s '
|
||||
'on %(backend)s') %
|
||||
{'id': volume_id,
|
||||
'backend': backend})
|
||||
|
||||
def find_retype_host(self, context, request_spec, filter_properties=None,
|
||||
migration_policy='never'):
|
||||
"""Find a host that can accept the volume with its new type."""
|
||||
def find_retype_backend(self, context, request_spec,
|
||||
filter_properties=None, migration_policy='never'):
|
||||
"""Find a backend that can accept the volume with its new type."""
|
||||
filter_properties = filter_properties or {}
|
||||
backend = (request_spec['volume_properties'].get('cluster_name')
|
||||
or request_spec['volume_properties']['host'])
|
||||
@ -156,10 +158,10 @@ class FilterScheduler(driver.Scheduler):
|
||||
weighed_backends = self._get_weighted_candidates(context, request_spec,
|
||||
filter_properties)
|
||||
if not weighed_backends:
|
||||
raise exception.NoValidHost(reason=_('No valid hosts for volume '
|
||||
'%(id)s with type %(type)s') %
|
||||
{'id': request_spec['volume_id'],
|
||||
'type': request_spec['volume_type']})
|
||||
raise exception.NoValidBackend(
|
||||
reason=_('No valid backends for volume %(id)s with type '
|
||||
'%(type)s') % {'id': request_spec['volume_id'],
|
||||
'type': request_spec['volume_type']})
|
||||
|
||||
for weighed_backend in weighed_backends:
|
||||
backend_state = weighed_backend.obj
|
||||
@ -183,31 +185,30 @@ class FilterScheduler(driver.Scheduler):
|
||||
return backend_state
|
||||
|
||||
if migration_policy == 'never':
|
||||
raise exception.NoValidHost(reason=_('Current host not valid for '
|
||||
'volume %(id)s with type '
|
||||
'%(type)s, migration not '
|
||||
'allowed') %
|
||||
{'id': request_spec['volume_id'],
|
||||
'type': request_spec['volume_type']})
|
||||
raise exception.NoValidBackend(
|
||||
reason=_('Current backend not valid for volume %(id)s with '
|
||||
'type %(type)s, migration not allowed') %
|
||||
{'id': request_spec['volume_id'],
|
||||
'type': request_spec['volume_type']})
|
||||
|
||||
top_host = self._choose_top_host(weighed_backends, request_spec)
|
||||
return top_host.obj
|
||||
top_backend = self._choose_top_backend(weighed_backends, request_spec)
|
||||
return top_backend.obj
|
||||
|
||||
def get_pools(self, context, filters):
|
||||
# TODO(zhiteng) Add filters support
|
||||
return self.host_manager.get_pools(context)
|
||||
|
||||
def _post_select_populate_filter_properties(self, filter_properties,
|
||||
host_state):
|
||||
backend_state):
|
||||
"""Populate filter properties with additional information.
|
||||
|
||||
Add additional information to the filter properties after a host has
|
||||
Add additional information to the filter properties after a backend has
|
||||
been selected by the scheduling process.
|
||||
"""
|
||||
# Add a retry entry for the selected volume backend:
|
||||
self._add_retry_host(filter_properties, host_state.backend_id)
|
||||
self._add_retry_backend(filter_properties, backend_state.backend_id)
|
||||
|
||||
def _add_retry_host(self, filter_properties, host):
|
||||
def _add_retry_backend(self, filter_properties, backend):
|
||||
"""Add a retry entry for the selected volume backend.
|
||||
|
||||
In the event that the request gets re-scheduled, this entry will signal
|
||||
@ -216,8 +217,11 @@ class FilterScheduler(driver.Scheduler):
|
||||
retry = filter_properties.get('retry', None)
|
||||
if not retry:
|
||||
return
|
||||
hosts = retry['hosts']
|
||||
hosts.append(host)
|
||||
# TODO(geguileo): In P - change to only use backends
|
||||
for key in ('hosts', 'backends'):
|
||||
backends = retry.get(key)
|
||||
if backends is not None:
|
||||
backends.append(backend)
|
||||
|
||||
def _max_attempts(self):
|
||||
max_attempts = CONF.scheduler_max_attempts
|
||||
@ -233,21 +237,22 @@ class FilterScheduler(driver.Scheduler):
|
||||
if not exc:
|
||||
return # no exception info from a previous attempt, skip
|
||||
|
||||
hosts = retry.get('hosts', None)
|
||||
if not hosts:
|
||||
# TODO(geguileo): In P - change to hosts = retry.get('backends')
|
||||
backends = retry.get('backends', retry.get('hosts'))
|
||||
if not backends:
|
||||
return # no previously attempted hosts, skip
|
||||
|
||||
last_host = hosts[-1]
|
||||
last_backend = backends[-1]
|
||||
LOG.error(_LE("Error scheduling %(volume_id)s from last vol-service: "
|
||||
"%(last_host)s : %(exc)s"),
|
||||
"%(last_backend)s : %(exc)s"),
|
||||
{'volume_id': volume_id,
|
||||
'last_host': last_host,
|
||||
'last_backend': last_backend,
|
||||
'exc': exc})
|
||||
|
||||
def _populate_retry(self, filter_properties, properties):
|
||||
"""Populate filter properties with history of retries for request.
|
||||
|
||||
If maximum retries is exceeded, raise NoValidHost.
|
||||
If maximum retries is exceeded, raise NoValidBackend.
|
||||
"""
|
||||
max_attempts = self.max_attempts
|
||||
retry = filter_properties.pop('retry', {})
|
||||
@ -262,7 +267,8 @@ class FilterScheduler(driver.Scheduler):
|
||||
else:
|
||||
retry = {
|
||||
'num_attempts': 1,
|
||||
'hosts': [] # list of volume service hosts tried
|
||||
'backends': [], # list of volume service backends tried
|
||||
'hosts': [] # TODO(geguileo): Remove in P and leave backends
|
||||
}
|
||||
filter_properties['retry'] = retry
|
||||
|
||||
@ -270,7 +276,7 @@ class FilterScheduler(driver.Scheduler):
|
||||
self._log_volume_error(volume_id, retry)
|
||||
|
||||
if retry['num_attempts'] > max_attempts:
|
||||
raise exception.NoValidHost(
|
||||
raise exception.NoValidBackend(
|
||||
reason=_("Exceeded max scheduling attempts %(max_attempts)d "
|
||||
"for volume %(volume_id)s") %
|
||||
{'max_attempts': max_attempts,
|
||||
@ -278,7 +284,7 @@ class FilterScheduler(driver.Scheduler):
|
||||
|
||||
def _get_weighted_candidates(self, context, request_spec,
|
||||
filter_properties=None):
|
||||
"""Return a list of hosts that meet required specs.
|
||||
"""Return a list of backends that meet required specs.
|
||||
|
||||
Returned list is ordered by their fitness.
|
||||
"""
|
||||
@ -320,26 +326,26 @@ class FilterScheduler(driver.Scheduler):
|
||||
resource_type['extra_specs'].update(
|
||||
multiattach='<is> True')
|
||||
|
||||
# Find our local list of acceptable hosts by filtering and
|
||||
# Find our local list of acceptable backends by filtering and
|
||||
# weighing our options. we virtually consume resources on
|
||||
# it so subsequent selections can adjust accordingly.
|
||||
|
||||
# Note: remember, we are using an iterator here. So only
|
||||
# traverse this list once.
|
||||
hosts = self.host_manager.get_all_host_states(elevated)
|
||||
backends = self.host_manager.get_all_backend_states(elevated)
|
||||
|
||||
# Filter local hosts based on requirements ...
|
||||
hosts = self.host_manager.get_filtered_hosts(hosts,
|
||||
filter_properties)
|
||||
if not hosts:
|
||||
backends = self.host_manager.get_filtered_backends(backends,
|
||||
filter_properties)
|
||||
if not backends:
|
||||
return []
|
||||
|
||||
LOG.debug("Filtered %s", hosts)
|
||||
# weighted_host = WeightedHost() ... the best
|
||||
# host for the job.
|
||||
weighed_hosts = self.host_manager.get_weighed_hosts(hosts,
|
||||
filter_properties)
|
||||
return weighed_hosts
|
||||
LOG.debug("Filtered %s", backends)
|
||||
# weighted_backends = WeightedHost() ... the best
|
||||
# backend for the job.
|
||||
weighed_backends = self.host_manager.get_weighed_backends(
|
||||
backends, filter_properties)
|
||||
return weighed_backends
|
||||
|
||||
def _get_weighted_candidates_group(self, context, request_spec_list,
|
||||
filter_properties_list=None):
|
||||
@ -350,7 +356,7 @@ class FilterScheduler(driver.Scheduler):
|
||||
"""
|
||||
elevated = context.elevated()
|
||||
|
||||
weighed_hosts = []
|
||||
weighed_backends = []
|
||||
index = 0
|
||||
for request_spec in request_spec_list:
|
||||
volume_properties = request_spec['volume_properties']
|
||||
@ -388,67 +394,67 @@ class FilterScheduler(driver.Scheduler):
|
||||
self.populate_filter_properties(request_spec,
|
||||
filter_properties)
|
||||
|
||||
# Find our local list of acceptable hosts by filtering and
|
||||
# Find our local list of acceptable backends by filtering and
|
||||
# weighing our options. we virtually consume resources on
|
||||
# it so subsequent selections can adjust accordingly.
|
||||
|
||||
# Note: remember, we are using an iterator here. So only
|
||||
# traverse this list once.
|
||||
all_hosts = self.host_manager.get_all_host_states(elevated)
|
||||
if not all_hosts:
|
||||
all_backends = self.host_manager.get_all_backend_states(elevated)
|
||||
if not all_backends:
|
||||
return []
|
||||
|
||||
# Filter local hosts based on requirements ...
|
||||
hosts = self.host_manager.get_filtered_hosts(all_hosts,
|
||||
filter_properties)
|
||||
# Filter local backends based on requirements ...
|
||||
backends = self.host_manager.get_filtered_backends(
|
||||
all_backends, filter_properties)
|
||||
|
||||
if not hosts:
|
||||
if not backends:
|
||||
return []
|
||||
|
||||
LOG.debug("Filtered %s", hosts)
|
||||
LOG.debug("Filtered %s", backends)
|
||||
|
||||
# weighted_host = WeightedHost() ... the best
|
||||
# host for the job.
|
||||
temp_weighed_hosts = self.host_manager.get_weighed_hosts(
|
||||
hosts,
|
||||
temp_weighed_backends = self.host_manager.get_weighed_backends(
|
||||
backends,
|
||||
filter_properties)
|
||||
if not temp_weighed_hosts:
|
||||
if not temp_weighed_backends:
|
||||
return []
|
||||
if index == 0:
|
||||
weighed_hosts = temp_weighed_hosts
|
||||
weighed_backends = temp_weighed_backends
|
||||
else:
|
||||
new_weighed_hosts = []
|
||||
for host1 in weighed_hosts:
|
||||
for host2 in temp_weighed_hosts:
|
||||
new_weighed_backends = []
|
||||
for backend1 in weighed_backends:
|
||||
for backend2 in temp_weighed_backends:
|
||||
# Should schedule creation of CG on backend level,
|
||||
# not pool level.
|
||||
if (utils.extract_host(host1.obj.backend_id) ==
|
||||
utils.extract_host(host2.obj.backend_id)):
|
||||
new_weighed_hosts.append(host1)
|
||||
weighed_hosts = new_weighed_hosts
|
||||
if not weighed_hosts:
|
||||
if (utils.extract_host(backend1.obj.backend_id) ==
|
||||
utils.extract_host(backend2.obj.backend_id)):
|
||||
new_weighed_backends.append(backend1)
|
||||
weighed_backends = new_weighed_backends
|
||||
if not weighed_backends:
|
||||
return []
|
||||
|
||||
index += 1
|
||||
|
||||
return weighed_hosts
|
||||
return weighed_backends
|
||||
|
||||
def _get_weighted_candidates_generic_group(
|
||||
self, context, group_spec, request_spec_list,
|
||||
group_filter_properties=None,
|
||||
filter_properties_list=None):
|
||||
"""Finds hosts that supports the group.
|
||||
"""Finds backends that supports the group.
|
||||
|
||||
Returns a list of hosts that meet the required specs,
|
||||
Returns a list of backends that meet the required specs,
|
||||
ordered by their fitness.
|
||||
"""
|
||||
elevated = context.elevated()
|
||||
|
||||
hosts_by_group_type = self._get_weighted_candidates_by_group_type(
|
||||
backends_by_group_type = self._get_weighted_candidates_by_group_type(
|
||||
context, group_spec, group_filter_properties)
|
||||
|
||||
weighed_hosts = []
|
||||
hosts_by_vol_type = []
|
||||
weighed_backends = []
|
||||
backends_by_vol_type = []
|
||||
index = 0
|
||||
for request_spec in request_spec_list:
|
||||
volume_properties = request_spec['volume_properties']
|
||||
@ -486,72 +492,72 @@ class FilterScheduler(driver.Scheduler):
|
||||
self.populate_filter_properties(request_spec,
|
||||
filter_properties)
|
||||
|
||||
# Find our local list of acceptable hosts by filtering and
|
||||
# Find our local list of acceptable backends by filtering and
|
||||
# weighing our options. we virtually consume resources on
|
||||
# it so subsequent selections can adjust accordingly.
|
||||
|
||||
# Note: remember, we are using an iterator here. So only
|
||||
# traverse this list once.
|
||||
all_hosts = self.host_manager.get_all_host_states(elevated)
|
||||
if not all_hosts:
|
||||
all_backends = self.host_manager.get_all_backend_states(elevated)
|
||||
if not all_backends:
|
||||
return []
|
||||
|
||||
# Filter local hosts based on requirements ...
|
||||
hosts = self.host_manager.get_filtered_hosts(all_hosts,
|
||||
filter_properties)
|
||||
# Filter local backends based on requirements ...
|
||||
backends = self.host_manager.get_filtered_backends(
|
||||
all_backends, filter_properties)
|
||||
|
||||
if not hosts:
|
||||
if not backends:
|
||||
return []
|
||||
|
||||
LOG.debug("Filtered %s", hosts)
|
||||
LOG.debug("Filtered %s", backends)
|
||||
|
||||
# weighted_host = WeightedHost() ... the best
|
||||
# host for the job.
|
||||
temp_weighed_hosts = self.host_manager.get_weighed_hosts(
|
||||
hosts,
|
||||
# weighted_backend = WeightedHost() ... the best
|
||||
# backend for the job.
|
||||
temp_weighed_backends = self.host_manager.get_weighed_backends(
|
||||
backends,
|
||||
filter_properties)
|
||||
if not temp_weighed_hosts:
|
||||
if not temp_weighed_backends:
|
||||
return []
|
||||
if index == 0:
|
||||
hosts_by_vol_type = temp_weighed_hosts
|
||||
backends_by_vol_type = temp_weighed_backends
|
||||
else:
|
||||
hosts_by_vol_type = self._find_valid_hosts(
|
||||
hosts_by_vol_type, temp_weighed_hosts)
|
||||
if not hosts_by_vol_type:
|
||||
backends_by_vol_type = self._find_valid_backends(
|
||||
backends_by_vol_type, temp_weighed_backends)
|
||||
if not backends_by_vol_type:
|
||||
return []
|
||||
|
||||
index += 1
|
||||
|
||||
# Find hosts selected by both the group type and volume types.
|
||||
weighed_hosts = self._find_valid_hosts(hosts_by_vol_type,
|
||||
hosts_by_group_type)
|
||||
# Find backends selected by both the group type and volume types.
|
||||
weighed_backends = self._find_valid_backends(backends_by_vol_type,
|
||||
backends_by_group_type)
|
||||
|
||||
return weighed_hosts
|
||||
return weighed_backends
|
||||
|
||||
def _find_valid_hosts(self, host_list1, host_list2):
|
||||
new_hosts = []
|
||||
for host1 in host_list1:
|
||||
for host2 in host_list2:
|
||||
def _find_valid_backends(self, backend_list1, backend_list2):
|
||||
new_backends = []
|
||||
for backend1 in backend_list1:
|
||||
for backend2 in backend_list2:
|
||||
# Should schedule creation of group on backend level,
|
||||
# not pool level.
|
||||
if (utils.extract_host(host1.obj.backend_id) ==
|
||||
utils.extract_host(host2.obj.backend_id)):
|
||||
new_hosts.append(host1)
|
||||
if not new_hosts:
|
||||
if (utils.extract_host(backend1.obj.backend_id) ==
|
||||
utils.extract_host(backend2.obj.backend_id)):
|
||||
new_backends.append(backend1)
|
||||
if not new_backends:
|
||||
return []
|
||||
return new_hosts
|
||||
return new_backends
|
||||
|
||||
def _get_weighted_candidates_by_group_type(
|
||||
self, context, group_spec,
|
||||
group_filter_properties=None):
|
||||
"""Finds hosts that supports the group type.
|
||||
"""Finds backends that supports the group type.
|
||||
|
||||
Returns a list of hosts that meet the required specs,
|
||||
Returns a list of backends that meet the required specs,
|
||||
ordered by their fitness.
|
||||
"""
|
||||
elevated = context.elevated()
|
||||
|
||||
weighed_hosts = []
|
||||
weighed_backends = []
|
||||
volume_properties = group_spec['volume_properties']
|
||||
# Since Cinder is using mixed filters from Oslo and it's own, which
|
||||
# takes 'resource_XX' and 'volume_XX' as input respectively,
|
||||
@ -577,97 +583,97 @@ class FilterScheduler(driver.Scheduler):
|
||||
self.populate_filter_properties(group_spec,
|
||||
group_filter_properties)
|
||||
|
||||
# Find our local list of acceptable hosts by filtering and
|
||||
# Find our local list of acceptable backends by filtering and
|
||||
# weighing our options. we virtually consume resources on
|
||||
# it so subsequent selections can adjust accordingly.
|
||||
|
||||
# Note: remember, we are using an iterator here. So only
|
||||
# traverse this list once.
|
||||
all_hosts = self.host_manager.get_all_host_states(elevated)
|
||||
if not all_hosts:
|
||||
all_backends = self.host_manager.get_all_backend_states(elevated)
|
||||
if not all_backends:
|
||||
return []
|
||||
|
||||
# Filter local hosts based on requirements ...
|
||||
hosts = self.host_manager.get_filtered_hosts(all_hosts,
|
||||
group_filter_properties)
|
||||
# Filter local backends based on requirements ...
|
||||
backends = self.host_manager.get_filtered_backends(
|
||||
all_backends, group_filter_properties)
|
||||
|
||||
if not hosts:
|
||||
if not backends:
|
||||
return []
|
||||
|
||||
LOG.debug("Filtered %s", hosts)
|
||||
LOG.debug("Filtered %s", backends)
|
||||
|
||||
# weighted_host = WeightedHost() ... the best
|
||||
# host for the job.
|
||||
weighed_hosts = self.host_manager.get_weighed_hosts(
|
||||
hosts,
|
||||
# weighted_backends = WeightedHost() ... the best backend for the job.
|
||||
weighed_backends = self.host_manager.get_weighed_backends(
|
||||
backends,
|
||||
group_filter_properties)
|
||||
if not weighed_hosts:
|
||||
if not weighed_backends:
|
||||
return []
|
||||
|
||||
return weighed_hosts
|
||||
return weighed_backends
|
||||
|
||||
def _schedule(self, context, request_spec, filter_properties=None):
|
||||
weighed_hosts = self._get_weighted_candidates(context, request_spec,
|
||||
filter_properties)
|
||||
# When we get the weighed_hosts, we clear those hosts whose backend
|
||||
# is not same as consistencygroup's backend.
|
||||
weighed_backends = self._get_weighted_candidates(context, request_spec,
|
||||
filter_properties)
|
||||
# When we get the weighed_backends, we clear those backends that don't
|
||||
# match the consistencygroup's backend.
|
||||
if request_spec.get('CG_backend'):
|
||||
group_backend = request_spec.get('CG_backend')
|
||||
else:
|
||||
group_backend = request_spec.get('group_backend')
|
||||
if weighed_hosts and group_backend:
|
||||
if weighed_backends and group_backend:
|
||||
# Get host name including host@backend#pool info from
|
||||
# weighed_hosts.
|
||||
for host in weighed_hosts[::-1]:
|
||||
backend = utils.extract_host(host.obj.backend_id)
|
||||
if backend != group_backend:
|
||||
weighed_hosts.remove(host)
|
||||
if not weighed_hosts:
|
||||
LOG.warning(_LW('No weighed hosts found for volume '
|
||||
# weighed_backends.
|
||||
for backend in weighed_backends[::-1]:
|
||||
backend_id = utils.extract_host(backend.obj.backend_id)
|
||||
if backend_id != group_backend:
|
||||
weighed_backends.remove(backend)
|
||||
if not weighed_backends:
|
||||
LOG.warning(_LW('No weighed backend found for volume '
|
||||
'with properties: %s'),
|
||||
filter_properties['request_spec'].get('volume_type'))
|
||||
return None
|
||||
return self._choose_top_host(weighed_hosts, request_spec)
|
||||
return self._choose_top_backend(weighed_backends, request_spec)
|
||||
|
||||
def _schedule_group(self, context, request_spec_list,
|
||||
filter_properties_list=None):
|
||||
weighed_hosts = self._get_weighted_candidates_group(
|
||||
weighed_backends = self._get_weighted_candidates_group(
|
||||
context,
|
||||
request_spec_list,
|
||||
filter_properties_list)
|
||||
if not weighed_hosts:
|
||||
if not weighed_backends:
|
||||
return None
|
||||
return self._choose_top_host_group(weighed_hosts, request_spec_list)
|
||||
return self._choose_top_backend_group(weighed_backends,
|
||||
request_spec_list)
|
||||
|
||||
def _schedule_generic_group(self, context, group_spec, request_spec_list,
|
||||
group_filter_properties=None,
|
||||
filter_properties_list=None):
|
||||
weighed_hosts = self._get_weighted_candidates_generic_group(
|
||||
weighed_backends = self._get_weighted_candidates_generic_group(
|
||||
context,
|
||||
group_spec,
|
||||
request_spec_list,
|
||||
group_filter_properties,
|
||||
filter_properties_list)
|
||||
if not weighed_hosts:
|
||||
if not weighed_backends:
|
||||
return None
|
||||
return self._choose_top_host_generic_group(weighed_hosts)
|
||||
return self._choose_top_backend_generic_group(weighed_backends)
|
||||
|
||||
def _choose_top_host(self, weighed_hosts, request_spec):
|
||||
top_host = weighed_hosts[0]
|
||||
host_state = top_host.obj
|
||||
LOG.debug("Choosing %s", host_state.backend_id)
|
||||
def _choose_top_backend(self, weighed_backends, request_spec):
|
||||
top_backend = weighed_backends[0]
|
||||
backend_state = top_backend.obj
|
||||
LOG.debug("Choosing %s", backend_state.backend_id)
|
||||
volume_properties = request_spec['volume_properties']
|
||||
host_state.consume_from_volume(volume_properties)
|
||||
return top_host
|
||||
backend_state.consume_from_volume(volume_properties)
|
||||
return top_backend
|
||||
|
||||
def _choose_top_host_group(self, weighed_hosts, request_spec_list):
|
||||
top_host = weighed_hosts[0]
|
||||
host_state = top_host.obj
|
||||
LOG.debug("Choosing %s", host_state.backend_id)
|
||||
return top_host
|
||||
def _choose_top_backend_group(self, weighed_backends, request_spec_list):
|
||||
top_backend = weighed_backends[0]
|
||||
backend_state = top_backend.obj
|
||||
LOG.debug("Choosing %s", backend_state.backend_id)
|
||||
return top_backend
|
||||
|
||||
def _choose_top_host_generic_group(self, weighed_hosts):
|
||||
top_host = weighed_hosts[0]
|
||||
host_state = top_host.obj
|
||||
LOG.debug("Choosing %s", host_state.backend_id)
|
||||
return top_host
|
||||
def _choose_top_backend_generic_group(self, weighed_backends):
|
||||
top_backend = weighed_backends[0]
|
||||
backend_state = top_backend.obj
|
||||
LOG.debug("Choosing %s", backend_state.backend_id)
|
||||
return top_backend
|
||||
|
@ -20,13 +20,15 @@ Scheduler host filters
|
||||
from cinder.scheduler import base_filter
|
||||
|
||||
|
||||
class BaseHostFilter(base_filter.BaseFilter):
|
||||
class BaseBackendFilter(base_filter.BaseFilter):
|
||||
"""Base class for host filters."""
|
||||
def _filter_one(self, obj, filter_properties):
|
||||
"""Return True if the object passes the filter, otherwise False."""
|
||||
return self.host_passes(obj, filter_properties)
|
||||
# For backward compatibility with out of tree filters
|
||||
passes_method = getattr(self, 'host_passes', self.backend_passes)
|
||||
return passes_method(obj, filter_properties)
|
||||
|
||||
def host_passes(self, host_state, filter_properties):
|
||||
def backend_passes(self, host_state, filter_properties):
|
||||
"""Return True if the HostState passes the filter, otherwise False.
|
||||
|
||||
Override this in a subclass.
|
||||
@ -34,6 +36,12 @@ class BaseHostFilter(base_filter.BaseFilter):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class HostFilterHandler(base_filter.BaseFilterHandler):
|
||||
class BackendFilterHandler(base_filter.BaseFilterHandler):
|
||||
def __init__(self, namespace):
|
||||
super(HostFilterHandler, self).__init__(BaseHostFilter, namespace)
|
||||
super(BackendFilterHandler, self).__init__(BaseHostFilter, namespace)
|
||||
|
||||
|
||||
# NOTE(geguileo): For backward compatibility with external filters that
|
||||
# inherit from these classes
|
||||
BaseHostFilter = BaseBackendFilter
|
||||
HostFilterHandler = BackendFilterHandler
|
||||
|
@ -20,7 +20,7 @@ from cinder.scheduler import filters
|
||||
from cinder.volume import api as volume
|
||||
|
||||
|
||||
class AffinityFilter(filters.BaseHostFilter):
|
||||
class AffinityFilter(filters.BaseBackendFilter):
|
||||
def __init__(self):
|
||||
self.volume_api = volume.API()
|
||||
|
||||
@ -36,7 +36,7 @@ class AffinityFilter(filters.BaseHostFilter):
|
||||
class DifferentBackendFilter(AffinityFilter):
|
||||
"""Schedule volume on a different back-end from a set of volumes."""
|
||||
|
||||
def host_passes(self, host_state, filter_properties):
|
||||
def backend_passes(self, backend_state, filter_properties):
|
||||
context = filter_properties['context']
|
||||
scheduler_hints = filter_properties.get('scheduler_hints') or {}
|
||||
|
||||
@ -62,7 +62,7 @@ class DifferentBackendFilter(AffinityFilter):
|
||||
|
||||
if affinity_uuids:
|
||||
return not self._get_volumes(context, affinity_uuids,
|
||||
host_state)
|
||||
backend_state)
|
||||
# With no different_host key
|
||||
return True
|
||||
|
||||
@ -70,7 +70,7 @@ class DifferentBackendFilter(AffinityFilter):
|
||||
class SameBackendFilter(AffinityFilter):
|
||||
"""Schedule volume on the same back-end as another volume."""
|
||||
|
||||
def host_passes(self, host_state, filter_properties):
|
||||
def backend_passes(self, backend_state, filter_properties):
|
||||
context = filter_properties['context']
|
||||
scheduler_hints = filter_properties.get('scheduler_hints') or {}
|
||||
|
||||
@ -95,7 +95,7 @@ class SameBackendFilter(AffinityFilter):
|
||||
return False
|
||||
|
||||
if affinity_uuids:
|
||||
return self._get_volumes(context, affinity_uuids, host_state)
|
||||
return self._get_volumes(context, affinity_uuids, backend_state)
|
||||
|
||||
# With no same_host key
|
||||
return True
|
||||
|
@ -16,17 +16,18 @@
|
||||
from cinder.scheduler import filters
|
||||
|
||||
|
||||
class AvailabilityZoneFilter(filters.BaseHostFilter):
|
||||
"""Filters Hosts by availability zone."""
|
||||
class AvailabilityZoneFilter(filters.BaseBackendFilter):
|
||||
"""Filters Backends by availability zone."""
|
||||
|
||||
# Availability zones do not change within a request
|
||||
run_filter_once_per_request = True
|
||||
|
||||
def host_passes(self, host_state, filter_properties):
|
||||
def backend_passes(self, backend_state, filter_properties):
|
||||
spec = filter_properties.get('request_spec', {})
|
||||
props = spec.get('resource_properties', {})
|
||||
availability_zone = props.get('availability_zone')
|
||||
|
||||
if availability_zone:
|
||||
return availability_zone == host_state.service['availability_zone']
|
||||
return (availability_zone ==
|
||||
backend_state.service['availability_zone'])
|
||||
return True
|
||||
|
@ -21,8 +21,8 @@ from cinder.scheduler.filters import extra_specs_ops
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CapabilitiesFilter(filters.BaseHostFilter):
|
||||
"""HostFilter to work with resource (instance & volume) type records."""
|
||||
class CapabilitiesFilter(filters.BaseBackendFilter):
|
||||
"""BackendFilter to work with resource (instance & volume) type records."""
|
||||
|
||||
def _satisfies_extra_specs(self, capabilities, resource_type):
|
||||
"""Check if capabilities satisfy resource type requirements.
|
||||
@ -55,7 +55,7 @@ class CapabilitiesFilter(filters.BaseHostFilter):
|
||||
try:
|
||||
cap = cap[scope[index]]
|
||||
except (TypeError, KeyError):
|
||||
LOG.debug("Host doesn't provide capability '%(cap)s' " %
|
||||
LOG.debug("Backend doesn't provide capability '%(cap)s' " %
|
||||
{'cap': scope[index]})
|
||||
return False
|
||||
|
||||
@ -75,15 +75,15 @@ class CapabilitiesFilter(filters.BaseHostFilter):
|
||||
return False
|
||||
return True
|
||||
|
||||
def host_passes(self, host_state, filter_properties):
|
||||
"""Return a list of hosts that can create resource_type."""
|
||||
def backend_passes(self, backend_state, filter_properties):
|
||||
"""Return a list of backends that can create resource_type."""
|
||||
# Note(zhiteng) Currently only Cinder and Nova are using
|
||||
# this filter, so the resource type is either instance or
|
||||
# volume.
|
||||
resource_type = filter_properties.get('resource_type')
|
||||
if not self._satisfies_extra_specs(host_state.capabilities,
|
||||
if not self._satisfies_extra_specs(backend_state.capabilities,
|
||||
resource_type):
|
||||
LOG.debug("%(host_state)s fails resource_type extra_specs "
|
||||
"requirements", {'host_state': host_state})
|
||||
LOG.debug("%(backend_state)s fails resource_type extra_specs "
|
||||
"requirements", {'backend_state': backend_state})
|
||||
return False
|
||||
return True
|
||||
|
@ -28,22 +28,22 @@ from cinder.scheduler import filters
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CapacityFilter(filters.BaseHostFilter):
|
||||
"""CapacityFilter filters based on volume host's capacity utilization."""
|
||||
class CapacityFilter(filters.BaseBackendFilter):
|
||||
"""Capacity filters based on volume backend's capacity utilization."""
|
||||
|
||||
def host_passes(self, host_state, filter_properties):
|
||||
def backend_passes(self, backend_state, filter_properties):
|
||||
"""Return True if host has sufficient capacity."""
|
||||
|
||||
# If the volume already exists on this host, don't fail it for
|
||||
# insufficient capacity (e.g., if we are retyping)
|
||||
if host_state.backend_id == filter_properties.get('vol_exists_on'):
|
||||
if backend_state.backend_id == filter_properties.get('vol_exists_on'):
|
||||
return True
|
||||
|
||||
spec = filter_properties.get('request_spec')
|
||||
if spec:
|
||||
volid = spec.get('volume_id')
|
||||
|
||||
grouping = 'cluster' if host_state.cluster_name else 'host'
|
||||
grouping = 'cluster' if backend_state.cluster_name else 'host'
|
||||
if filter_properties.get('new_size'):
|
||||
# If new_size is passed, we are allocating space to extend a volume
|
||||
requested_size = (int(filter_properties.get('new_size')) -
|
||||
@ -51,25 +51,25 @@ class CapacityFilter(filters.BaseHostFilter):
|
||||
LOG.debug('Checking if %(grouping)s %(grouping_name)s can extend '
|
||||
'the volume %(id)s in %(size)s GB',
|
||||
{'grouping': grouping,
|
||||
'grouping_name': host_state.backend_id, 'id': volid,
|
||||
'grouping_name': backend_state.backend_id, 'id': volid,
|
||||
'size': requested_size})
|
||||
else:
|
||||
requested_size = filter_properties.get('size')
|
||||
LOG.debug('Checking if %(grouping)s %(grouping_name)s can create '
|
||||
'a %(size)s GB volume (%(id)s)',
|
||||
{'grouping': grouping,
|
||||
'grouping_name': host_state.backend_id, 'id': volid,
|
||||
'grouping_name': backend_state.backend_id, 'id': volid,
|
||||
'size': requested_size})
|
||||
|
||||
if host_state.free_capacity_gb is None:
|
||||
if backend_state.free_capacity_gb is None:
|
||||
# Fail Safe
|
||||
LOG.error(_LE("Free capacity not set: "
|
||||
"volume node info collection broken."))
|
||||
return False
|
||||
|
||||
free_space = host_state.free_capacity_gb
|
||||
total_space = host_state.total_capacity_gb
|
||||
reserved = float(host_state.reserved_percentage) / 100
|
||||
free_space = backend_state.free_capacity_gb
|
||||
total_space = backend_state.total_capacity_gb
|
||||
reserved = float(backend_state.reserved_percentage) / 100
|
||||
if free_space in ['infinite', 'unknown']:
|
||||
# NOTE(zhiteng) for those back-ends cannot report actual
|
||||
# available capacity, we assume it is able to serve the
|
||||
@ -93,7 +93,7 @@ class CapacityFilter(filters.BaseHostFilter):
|
||||
"%(grouping_name)s."),
|
||||
{"total": total,
|
||||
"grouping": grouping,
|
||||
"grouping_name": host_state.backend_id})
|
||||
"grouping_name": backend_state.backend_id})
|
||||
return False
|
||||
# Calculate how much free space is left after taking into account
|
||||
# the reserved space.
|
||||
@ -114,16 +114,16 @@ class CapacityFilter(filters.BaseHostFilter):
|
||||
# thin_provisioning_support is True. Check if the ratio of
|
||||
# provisioned capacity over total capacity has exceeded over
|
||||
# subscription ratio.
|
||||
if (thin and host_state.thin_provisioning_support and
|
||||
host_state.max_over_subscription_ratio >= 1):
|
||||
provisioned_ratio = ((host_state.provisioned_capacity_gb +
|
||||
if (thin and backend_state.thin_provisioning_support and
|
||||
backend_state.max_over_subscription_ratio >= 1):
|
||||
provisioned_ratio = ((backend_state.provisioned_capacity_gb +
|
||||
requested_size) / total)
|
||||
if provisioned_ratio > host_state.max_over_subscription_ratio:
|
||||
if provisioned_ratio > backend_state.max_over_subscription_ratio:
|
||||
msg_args = {
|
||||
"provisioned_ratio": provisioned_ratio,
|
||||
"oversub_ratio": host_state.max_over_subscription_ratio,
|
||||
"oversub_ratio": backend_state.max_over_subscription_ratio,
|
||||
"grouping": grouping,
|
||||
"grouping_name": host_state.backend_id,
|
||||
"grouping_name": backend_state.backend_id,
|
||||
}
|
||||
LOG.warning(_LW(
|
||||
"Insufficient free space for thin provisioning. "
|
||||
@ -140,20 +140,20 @@ class CapacityFilter(filters.BaseHostFilter):
|
||||
# the currently available free capacity (taking into account
|
||||
# of reserved space) which we can over-subscribe.
|
||||
adjusted_free_virtual = (
|
||||
free * host_state.max_over_subscription_ratio)
|
||||
free * backend_state.max_over_subscription_ratio)
|
||||
return adjusted_free_virtual >= requested_size
|
||||
elif thin and host_state.thin_provisioning_support:
|
||||
elif thin and backend_state.thin_provisioning_support:
|
||||
LOG.warning(_LW("Filtering out %(grouping)s %(grouping_name)s "
|
||||
"with an invalid maximum over subscription ratio "
|
||||
"of %(oversub_ratio).2f. The ratio should be a "
|
||||
"minimum of 1.0."),
|
||||
{"oversub_ratio":
|
||||
host_state.max_over_subscription_ratio,
|
||||
backend_state.max_over_subscription_ratio,
|
||||
"grouping": grouping,
|
||||
"grouping_name": host_state.backend_id})
|
||||
"grouping_name": backend_state.backend_id})
|
||||
return False
|
||||
|
||||
msg_args = {"grouping_name": host_state.backend_id,
|
||||
msg_args = {"grouping_name": backend_state.backend_id,
|
||||
"grouping": grouping,
|
||||
"requested": requested_size,
|
||||
"available": free}
|
||||
|
@ -24,33 +24,34 @@ from cinder.scheduler import filters
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DriverFilter(filters.BaseHostFilter):
|
||||
"""DriverFilter filters hosts based on a 'filter function' and metrics.
|
||||
class DriverFilter(filters.BaseBackendFilter):
|
||||
"""DriverFilter filters backend based on a 'filter function' and metrics.
|
||||
|
||||
DriverFilter filters based on volume host's provided 'filter function'
|
||||
DriverFilter filters based on volume backend's provided 'filter function'
|
||||
and metrics.
|
||||
"""
|
||||
|
||||
def host_passes(self, host_state, filter_properties):
|
||||
"""Determines whether a host has a passing filter_function or not."""
|
||||
stats = self._generate_stats(host_state, filter_properties)
|
||||
def backend_passes(self, backend_state, filter_properties):
|
||||
"""Determines if a backend has a passing filter_function or not."""
|
||||
stats = self._generate_stats(backend_state, filter_properties)
|
||||
|
||||
LOG.debug("Checking backend '%s'", stats['host_stats']['backend_id'])
|
||||
LOG.debug("Checking backend '%s'",
|
||||
stats['backend_stats']['backend_id'])
|
||||
result = self._check_filter_function(stats)
|
||||
LOG.debug("Result: %s", result)
|
||||
LOG.debug("Done checking backend '%s'",
|
||||
stats['host_stats']['backend_id'])
|
||||
stats['backend_stats']['backend_id'])
|
||||
|
||||
return result
|
||||
|
||||
def _check_filter_function(self, stats):
|
||||
"""Checks if a volume passes a host's filter function.
|
||||
"""Checks if a volume passes a backend's filter function.
|
||||
|
||||
Returns a tuple in the format (filter_passing, filter_invalid).
|
||||
Both values are booleans.
|
||||
"""
|
||||
if stats['filter_function'] is None:
|
||||
LOG.debug("Filter function not set :: passing host")
|
||||
LOG.debug("Filter function not set :: passing backend")
|
||||
return True
|
||||
|
||||
try:
|
||||
@ -60,7 +61,7 @@ class DriverFilter(filters.BaseHostFilter):
|
||||
# Warn the admin for now that there is an error in the
|
||||
# filter function.
|
||||
LOG.warning(_LW("Error in filtering function "
|
||||
"'%(function)s' : '%(error)s' :: failing host"),
|
||||
"'%(function)s' : '%(error)s' :: failing backend"),
|
||||
{'function': stats['filter_function'],
|
||||
'error': ex, })
|
||||
return False
|
||||
@ -69,8 +70,8 @@ class DriverFilter(filters.BaseHostFilter):
|
||||
|
||||
def _run_evaluator(self, func, stats):
|
||||
"""Evaluates a given function using the provided available stats."""
|
||||
host_stats = stats['host_stats']
|
||||
host_caps = stats['host_caps']
|
||||
backend_stats = stats['backend_stats']
|
||||
backend_caps = stats['backend_caps']
|
||||
extra_specs = stats['extra_specs']
|
||||
qos_specs = stats['qos_specs']
|
||||
volume_stats = stats['volume_stats']
|
||||
@ -78,39 +79,39 @@ class DriverFilter(filters.BaseHostFilter):
|
||||
result = evaluator.evaluate(
|
||||
func,
|
||||
extra=extra_specs,
|
||||
stats=host_stats,
|
||||
capabilities=host_caps,
|
||||
stats=backend_stats,
|
||||
capabilities=backend_caps,
|
||||
volume=volume_stats,
|
||||
qos=qos_specs)
|
||||
|
||||
return result
|
||||
|
||||
def _generate_stats(self, host_state, filter_properties):
|
||||
"""Generates statistics from host and volume data."""
|
||||
def _generate_stats(self, backend_state, filter_properties):
|
||||
"""Generates statistics from backend and volume data."""
|
||||
|
||||
host_stats = {
|
||||
'host': host_state.host,
|
||||
'cluster_name': host_state.cluster_name,
|
||||
'backend_id': host_state.backend_id,
|
||||
'volume_backend_name': host_state.volume_backend_name,
|
||||
'vendor_name': host_state.vendor_name,
|
||||
'driver_version': host_state.driver_version,
|
||||
'storage_protocol': host_state.storage_protocol,
|
||||
'QoS_support': host_state.QoS_support,
|
||||
'total_capacity_gb': host_state.total_capacity_gb,
|
||||
'allocated_capacity_gb': host_state.allocated_capacity_gb,
|
||||
'free_capacity_gb': host_state.free_capacity_gb,
|
||||
'reserved_percentage': host_state.reserved_percentage,
|
||||
'updated': host_state.updated,
|
||||
backend_stats = {
|
||||
'host': backend_state.host,
|
||||
'cluster_name': backend_state.cluster_name,
|
||||
'backend_id': backend_state.backend_id,
|
||||
'volume_backend_name': backend_state.volume_backend_name,
|
||||
'vendor_name': backend_state.vendor_name,
|
||||
'driver_version': backend_state.driver_version,
|
||||
'storage_protocol': backend_state.storage_protocol,
|
||||
'QoS_support': backend_state.QoS_support,
|
||||
'total_capacity_gb': backend_state.total_capacity_gb,
|
||||
'allocated_capacity_gb': backend_state.allocated_capacity_gb,
|
||||
'free_capacity_gb': backend_state.free_capacity_gb,
|
||||
'reserved_percentage': backend_state.reserved_percentage,
|
||||
'updated': backend_state.updated,
|
||||
}
|
||||
|
||||
host_caps = host_state.capabilities
|
||||
backend_caps = backend_state.capabilities
|
||||
|
||||
filter_function = None
|
||||
|
||||
if ('filter_function' in host_caps and
|
||||
host_caps['filter_function'] is not None):
|
||||
filter_function = six.text_type(host_caps['filter_function'])
|
||||
if ('filter_function' in backend_caps and
|
||||
backend_caps['filter_function'] is not None):
|
||||
filter_function = six.text_type(backend_caps['filter_function'])
|
||||
|
||||
qos_specs = filter_properties.get('qos_specs', {})
|
||||
|
||||
@ -121,8 +122,8 @@ class DriverFilter(filters.BaseHostFilter):
|
||||
volume_stats = request_spec.get('volume_properties', {})
|
||||
|
||||
stats = {
|
||||
'host_stats': host_stats,
|
||||
'host_caps': host_caps,
|
||||
'backend_stats': backend_stats,
|
||||
'backend_caps': backend_caps,
|
||||
'extra_specs': extra_specs,
|
||||
'qos_specs': qos_specs,
|
||||
'volume_stats': volume_stats,
|
||||
|
@ -20,7 +20,7 @@ from cinder.scheduler import filters
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class IgnoreAttemptedHostsFilter(filters.BaseHostFilter):
|
||||
class IgnoreAttemptedHostsFilter(filters.BaseBackendFilter):
|
||||
"""Filter out previously attempted hosts
|
||||
|
||||
A host passes this filter if it has not already been attempted for
|
||||
@ -30,13 +30,13 @@ class IgnoreAttemptedHostsFilter(filters.BaseHostFilter):
|
||||
|
||||
{
|
||||
'retry': {
|
||||
'hosts': ['host1', 'host2'],
|
||||
'backends': ['backend1', 'backend2'],
|
||||
'num_attempts': 3,
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
def host_passes(self, host_state, filter_properties):
|
||||
def backend_passes(self, backend_state, filter_properties):
|
||||
"""Skip nodes that have already been attempted."""
|
||||
attempted = filter_properties.get('retry')
|
||||
if not attempted:
|
||||
@ -44,14 +44,15 @@ class IgnoreAttemptedHostsFilter(filters.BaseHostFilter):
|
||||
LOG.debug("Re-scheduling is disabled.")
|
||||
return True
|
||||
|
||||
hosts = attempted.get('hosts', [])
|
||||
host = host_state.backend_id
|
||||
# TODO(geguileo): In P - Just use backends
|
||||
backends = attempted.get('backends', attempted.get('hosts', []))
|
||||
backend = backend_state.backend_id
|
||||
|
||||
passes = host not in hosts
|
||||
passes = backend not in backends
|
||||
pass_msg = "passes" if passes else "fails"
|
||||
|
||||
LOG.debug("Host %(host)s %(pass_msg)s. Previously tried hosts: "
|
||||
"%(hosts)s" % {'host': host,
|
||||
'pass_msg': pass_msg,
|
||||
'hosts': hosts})
|
||||
LOG.debug("Backend %(backend)s %(pass_msg)s. Previously tried "
|
||||
"backends: %(backends)s" % {'backend': backend,
|
||||
'pass_msg': pass_msg,
|
||||
'backends': backends})
|
||||
return passes
|
||||
|
@ -30,7 +30,7 @@ INSTANCE_HOST_PROP = 'OS-EXT-SRV-ATTR:host'
|
||||
REQUESTS_TIMEOUT = 5
|
||||
|
||||
|
||||
class InstanceLocalityFilter(filters.BaseHostFilter):
|
||||
class InstanceLocalityFilter(filters.BaseBackendFilter):
|
||||
"""Schedule volume on the same host as a given instance.
|
||||
|
||||
This filter enables selection of a storage back-end located on the host
|
||||
@ -51,7 +51,7 @@ class InstanceLocalityFilter(filters.BaseHostFilter):
|
||||
|
||||
def __init__(self):
|
||||
# Cache Nova API answers directly into the Filter object.
|
||||
# Since a BaseHostFilter instance lives only during the volume's
|
||||
# Since a BaseBackendFilter instance lives only during the volume's
|
||||
# scheduling, the cache is re-created for every new volume creation.
|
||||
self._cache = {}
|
||||
super(InstanceLocalityFilter, self).__init__()
|
||||
@ -69,9 +69,9 @@ class InstanceLocalityFilter(filters.BaseHostFilter):
|
||||
|
||||
return self._nova_ext_srv_attr
|
||||
|
||||
def host_passes(self, backend_state, filter_properties):
|
||||
def backend_passes(self, backend_state, filter_properties):
|
||||
context = filter_properties['context']
|
||||
host = volume_utils.extract_host(backend_state.backend_id, 'host')
|
||||
backend = volume_utils.extract_host(backend_state.backend_id, 'host')
|
||||
|
||||
scheduler_hints = filter_properties.get('scheduler_hints') or {}
|
||||
instance_uuid = scheduler_hints.get(HINT_KEYWORD, None)
|
||||
@ -93,7 +93,7 @@ class InstanceLocalityFilter(filters.BaseHostFilter):
|
||||
|
||||
# First, lookup for already-known information in local cache
|
||||
if instance_uuid in self._cache:
|
||||
return self._cache[instance_uuid] == host
|
||||
return self._cache[instance_uuid] == backend
|
||||
|
||||
if not self._nova_has_extended_server_attributes(context):
|
||||
LOG.warning(_LW('Hint "%s" dropped because '
|
||||
@ -116,5 +116,5 @@ class InstanceLocalityFilter(filters.BaseHostFilter):
|
||||
|
||||
self._cache[instance_uuid] = getattr(server, INSTANCE_HOST_PROP)
|
||||
|
||||
# Match if given instance is hosted on host
|
||||
return self._cache[instance_uuid] == host
|
||||
# Match if given instance is hosted on backend
|
||||
return self._cache[instance_uuid] == backend
|
||||
|
@ -21,8 +21,8 @@ import six
|
||||
from cinder.scheduler import filters
|
||||
|
||||
|
||||
class JsonFilter(filters.BaseHostFilter):
|
||||
"""Host Filter to allow simple JSON-based grammar for selecting hosts."""
|
||||
class JsonFilter(filters.BaseBackendFilter):
|
||||
"""Backend filter for simple JSON-based grammar for selecting backends."""
|
||||
def _op_compare(self, args, op):
|
||||
"""Compare first item of args with the rest using specified operator.
|
||||
|
||||
@ -87,12 +87,12 @@ class JsonFilter(filters.BaseHostFilter):
|
||||
'and': _and,
|
||||
}
|
||||
|
||||
def _parse_string(self, string, host_state):
|
||||
def _parse_string(self, string, backend_state):
|
||||
"""Parse capability lookup strings.
|
||||
|
||||
Strings prefixed with $ are capability lookups in the
|
||||
form '$variable' where 'variable' is an attribute in the
|
||||
HostState class. If $variable is a dictionary, you may
|
||||
BackendState class. If $variable is a dictionary, you may
|
||||
use: $variable.dictkey
|
||||
"""
|
||||
if not string:
|
||||
@ -101,7 +101,7 @@ class JsonFilter(filters.BaseHostFilter):
|
||||
return string
|
||||
|
||||
path = string[1:].split(".")
|
||||
obj = getattr(host_state, path[0], None)
|
||||
obj = getattr(backend_state, path[0], None)
|
||||
if obj is None:
|
||||
return None
|
||||
for item in path[1:]:
|
||||
@ -110,7 +110,7 @@ class JsonFilter(filters.BaseHostFilter):
|
||||
return None
|
||||
return obj
|
||||
|
||||
def _process_filter(self, query, host_state):
|
||||
def _process_filter(self, query, backend_state):
|
||||
"""Recursively parse the query structure."""
|
||||
if not query:
|
||||
return True
|
||||
@ -119,16 +119,16 @@ class JsonFilter(filters.BaseHostFilter):
|
||||
cooked_args = []
|
||||
for arg in query[1:]:
|
||||
if isinstance(arg, list):
|
||||
arg = self._process_filter(arg, host_state)
|
||||
arg = self._process_filter(arg, backend_state)
|
||||
elif isinstance(arg, six.string_types):
|
||||
arg = self._parse_string(arg, host_state)
|
||||
arg = self._parse_string(arg, backend_state)
|
||||
if arg is not None:
|
||||
cooked_args.append(arg)
|
||||
result = method(self, cooked_args)
|
||||
return result
|
||||
|
||||
def host_passes(self, host_state, filter_properties):
|
||||
"""Return a list of hosts that can fulfill query requirements."""
|
||||
def backend_passes(self, backend_state, filter_properties):
|
||||
"""Return a list of backends that can fulfill query requirements."""
|
||||
# TODO(zhiteng) Add description for filter_properties structure
|
||||
# and scheduler_hints.
|
||||
try:
|
||||
@ -141,9 +141,9 @@ class JsonFilter(filters.BaseHostFilter):
|
||||
# NOTE(comstud): Not checking capabilities or service for
|
||||
# enabled/disabled so that a provided json filter can decide
|
||||
|
||||
result = self._process_filter(jsonutils.loads(query), host_state)
|
||||
result = self._process_filter(jsonutils.loads(query), backend_state)
|
||||
if isinstance(result, list):
|
||||
# If any succeeded, include the host
|
||||
# If any succeeded, include the backend
|
||||
result = any(result)
|
||||
if result:
|
||||
# Filter it out.
|
||||
|
@ -124,11 +124,11 @@ class ScheduleCreateVolumeTask(flow_utils.CinderTask):
|
||||
except Exception as e:
|
||||
# An error happened, notify on the scheduler queue and log that
|
||||
# this happened and set the volume to errored out and reraise the
|
||||
# error *if* exception caught isn't NoValidHost. Otherwise *do not*
|
||||
# reraise (since what's the point?)
|
||||
# error *if* exception caught isn't NoValidBackend. Otherwise *do
|
||||
# not* reraise (since what's the point?)
|
||||
with excutils.save_and_reraise_exception(
|
||||
reraise=not isinstance(e, exception.NoValidHost)):
|
||||
if isinstance(e, exception.NoValidHost):
|
||||
reraise=not isinstance(e, exception.NoValidBackend)):
|
||||
if isinstance(e, exception.NoValidBackend):
|
||||
self.message_api.create(
|
||||
context,
|
||||
defined_messages.UNABLE_TO_ALLOCATE,
|
||||
|
@ -14,7 +14,7 @@
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Manage hosts in the current zone.
|
||||
Manage backends in the current zone.
|
||||
"""
|
||||
|
||||
import collections
|
||||
@ -34,6 +34,11 @@ from cinder.scheduler import filters
|
||||
from cinder.volume import utils as vol_utils
|
||||
|
||||
|
||||
# FIXME: This file should be renamed to backend_manager, we should also rename
|
||||
# HostManager class, and scheduler_host_manager option, and also the weight
|
||||
# classes, and add code to maintain backward compatibility.
|
||||
|
||||
|
||||
host_manager_opts = [
|
||||
cfg.ListOpt('scheduler_default_filters',
|
||||
default=[
|
||||
@ -83,7 +88,7 @@ class ReadOnlyDict(collections.Mapping):
|
||||
return '%s(%r)' % (self.__class__.__name__, self.data)
|
||||
|
||||
|
||||
class HostState(object):
|
||||
class BackendState(object):
|
||||
"""Mutable and immutable information tracked for a volume backend."""
|
||||
|
||||
def __init__(self, host, cluster_name, capabilities=None, service=None):
|
||||
@ -303,12 +308,11 @@ class HostState(object):
|
||||
# come up with better representation of HostState.
|
||||
grouping = 'cluster' if self.cluster_name else 'host'
|
||||
grouping_name = self.backend_id
|
||||
|
||||
return ("%s '%s': free_capacity_gb: %s, pools: %s" %
|
||||
(grouping, grouping_name, self.free_capacity_gb, self.pools))
|
||||
|
||||
|
||||
class PoolState(HostState):
|
||||
class PoolState(BackendState):
|
||||
def __init__(self, host, cluster_name, capabilities, pool_name):
|
||||
new_host = vol_utils.append_host(host, pool_name)
|
||||
new_cluster = vol_utils.append_host(cluster_name, pool_name)
|
||||
@ -356,7 +360,7 @@ class PoolState(HostState):
|
||||
class HostManager(object):
|
||||
"""Base HostManager class."""
|
||||
|
||||
host_state_cls = HostState
|
||||
backend_state_cls = BackendState
|
||||
|
||||
REQUIRED_KEYS = frozenset([
|
||||
'pool_name',
|
||||
@ -371,20 +375,20 @@ class HostManager(object):
|
||||
|
||||
def __init__(self):
|
||||
self.service_states = {} # { <host>: {<service>: {cap k : v}}}
|
||||
self.host_state_map = {}
|
||||
self.filter_handler = filters.HostFilterHandler('cinder.scheduler.'
|
||||
'filters')
|
||||
self.backend_state_map = {}
|
||||
self.filter_handler = filters.BackendFilterHandler('cinder.scheduler.'
|
||||
'filters')
|
||||
self.filter_classes = self.filter_handler.get_all_classes()
|
||||
self.weight_handler = importutils.import_object(
|
||||
CONF.scheduler_weight_handler,
|
||||
'cinder.scheduler.weights')
|
||||
self.weight_classes = self.weight_handler.get_all_classes()
|
||||
|
||||
self._no_capabilities_hosts = set() # Hosts having no capabilities
|
||||
self._update_host_state_map(cinder_context.get_admin_context())
|
||||
self._no_capabilities_hosts = set() # Services without capabilities
|
||||
self._update_backend_state_map(cinder_context.get_admin_context())
|
||||
self.service_states_last_update = {}
|
||||
|
||||
def _choose_host_filters(self, filter_cls_names):
|
||||
def _choose_backend_filters(self, filter_cls_names):
|
||||
"""Return a list of available filter names.
|
||||
|
||||
This function checks input filter names against a predefined set
|
||||
@ -411,7 +415,7 @@ class HostManager(object):
|
||||
filter_name=", ".join(bad_filters))
|
||||
return good_filters
|
||||
|
||||
def _choose_host_weighers(self, weight_cls_names):
|
||||
def _choose_backend_weighers(self, weight_cls_names):
|
||||
"""Return a list of available weigher names.
|
||||
|
||||
This function checks input weigher names against a predefined set
|
||||
@ -439,20 +443,20 @@ class HostManager(object):
|
||||
weigher_name=", ".join(bad_weighers))
|
||||
return good_weighers
|
||||
|
||||
def get_filtered_hosts(self, hosts, filter_properties,
|
||||
filter_class_names=None):
|
||||
"""Filter hosts and return only ones passing all filters."""
|
||||
filter_classes = self._choose_host_filters(filter_class_names)
|
||||
def get_filtered_backends(self, backends, filter_properties,
|
||||
filter_class_names=None):
|
||||
"""Filter backends and return only ones passing all filters."""
|
||||
filter_classes = self._choose_backend_filters(filter_class_names)
|
||||
return self.filter_handler.get_filtered_objects(filter_classes,
|
||||
hosts,
|
||||
backends,
|
||||
filter_properties)
|
||||
|
||||
def get_weighed_hosts(self, hosts, weight_properties,
|
||||
weigher_class_names=None):
|
||||
"""Weigh the hosts."""
|
||||
weigher_classes = self._choose_host_weighers(weigher_class_names)
|
||||
def get_weighed_backends(self, backends, weight_properties,
|
||||
weigher_class_names=None):
|
||||
"""Weigh the backends."""
|
||||
weigher_classes = self._choose_backend_weighers(weigher_class_names)
|
||||
return self.weight_handler.get_weighed_objects(weigher_classes,
|
||||
hosts,
|
||||
backends,
|
||||
weight_properties)
|
||||
|
||||
def update_service_capabilities(self, service_name, host, capabilities,
|
||||
@ -532,7 +536,7 @@ class HostManager(object):
|
||||
def has_all_capabilities(self):
|
||||
return len(self._no_capabilities_hosts) == 0
|
||||
|
||||
def _update_host_state_map(self, context):
|
||||
def _update_backend_state_map(self, context):
|
||||
|
||||
# Get resource usage across the available volume nodes:
|
||||
topic = constants.VOLUME_TOPIC
|
||||
@ -555,14 +559,14 @@ class HostManager(object):
|
||||
|
||||
# Since the service could have been added or remove from a cluster
|
||||
backend_key = service.service_topic_queue
|
||||
backend_state = self.host_state_map.get(backend_key, None)
|
||||
backend_state = self.backend_state_map.get(backend_key, None)
|
||||
if not backend_state:
|
||||
backend_state = self.host_state_cls(
|
||||
backend_state = self.backend_state_cls(
|
||||
host,
|
||||
service.cluster_name,
|
||||
capabilities=capabilities,
|
||||
service=dict(service))
|
||||
self.host_state_map[backend_key] = backend_state
|
||||
self.backend_state_map[backend_key] = backend_state
|
||||
|
||||
# We may be receiving capability reports out of order from
|
||||
# different volume services in a cluster, so we drop older updates
|
||||
@ -577,8 +581,8 @@ class HostManager(object):
|
||||
|
||||
self._no_capabilities_hosts = no_capabilities_hosts
|
||||
|
||||
# remove non-active keys from host_state_map
|
||||
inactive_backend_keys = set(self.host_state_map) - active_backends
|
||||
# remove non-active keys from backend_state_map
|
||||
inactive_backend_keys = set(self.backend_state_map) - active_backends
|
||||
for backend_key in inactive_backend_keys:
|
||||
# NOTE(geguileo): We don't want to log the removal of a host from
|
||||
# the map when we are removing it because it has been added to a
|
||||
@ -586,27 +590,28 @@ class HostManager(object):
|
||||
if backend_key not in active_hosts:
|
||||
LOG.info(_LI("Removing non-active backend: %(backend)s from "
|
||||
"scheduler cache."), {'backend': backend_key})
|
||||
del self.host_state_map[backend_key]
|
||||
del self.backend_state_map[backend_key]
|
||||
|
||||
def get_all_host_states(self, context):
|
||||
"""Returns a dict of all the hosts the HostManager knows about.
|
||||
def get_all_backend_states(self, context):
|
||||
"""Returns a dict of all the backends the HostManager knows about.
|
||||
|
||||
Each of the consumable resources in HostState are
|
||||
Each of the consumable resources in BackendState are
|
||||
populated with capabilities scheduler received from RPC.
|
||||
|
||||
For example:
|
||||
{'192.168.1.100': HostState(), ...}
|
||||
{'192.168.1.100': BackendState(), ...}
|
||||
"""
|
||||
|
||||
self._update_host_state_map(context)
|
||||
self._update_backend_state_map(context)
|
||||
|
||||
# build a pool_state map and return that map instead of host_state_map
|
||||
# build a pool_state map and return that map instead of
|
||||
# backend_state_map
|
||||
all_pools = {}
|
||||
for host, state in self.host_state_map.items():
|
||||
for backend_key, state in self.backend_state_map.items():
|
||||
for key in state.pools:
|
||||
pool = state.pools[key]
|
||||
# use host.pool_name to make sure key is unique
|
||||
pool_key = '.'.join([host, pool.pool_name])
|
||||
# use backend_key.pool_name to make sure key is unique
|
||||
pool_key = '.'.join([backend_key, pool.pool_name])
|
||||
all_pools[pool_key] = pool
|
||||
|
||||
return all_pools.values()
|
||||
@ -614,14 +619,14 @@ class HostManager(object):
|
||||
def get_pools(self, context):
|
||||
"""Returns a dict of all pools on all hosts HostManager knows about."""
|
||||
|
||||
self._update_host_state_map(context)
|
||||
self._update_backend_state_map(context)
|
||||
|
||||
all_pools = []
|
||||
for host, state in self.host_state_map.items():
|
||||
for backend_key, state in self.backend_state_map.items():
|
||||
for key in state.pools:
|
||||
pool = state.pools[key]
|
||||
# use host.pool_name to make sure key is unique
|
||||
pool_key = vol_utils.append_host(host, pool.pool_name)
|
||||
# use backend_key.pool_name to make sure key is unique
|
||||
pool_key = vol_utils.append_host(backend_key, pool.pool_name)
|
||||
new_pool = dict(name=pool_key)
|
||||
new_pool.update(dict(capabilities=pool.capabilities))
|
||||
all_pools.append(new_pool)
|
||||
|
@ -124,8 +124,8 @@ class SchedulerManager(manager.CleanableManager, manager.Manager):
|
||||
context, group,
|
||||
request_spec_list,
|
||||
filter_properties_list)
|
||||
except exception.NoValidHost:
|
||||
LOG.error(_LE("Could not find a host for consistency group "
|
||||
except exception.NoValidBackend:
|
||||
LOG.error(_LE("Could not find a backend for consistency group "
|
||||
"%(group_id)s."),
|
||||
{'group_id': group.id})
|
||||
group.status = 'error'
|
||||
@ -149,8 +149,8 @@ class SchedulerManager(manager.CleanableManager, manager.Manager):
|
||||
request_spec_list,
|
||||
group_filter_properties,
|
||||
filter_properties_list)
|
||||
except exception.NoValidHost:
|
||||
LOG.error(_LE("Could not find a host for group "
|
||||
except exception.NoValidBackend:
|
||||
LOG.error(_LE("Could not find a backend for group "
|
||||
"%(group_id)s."),
|
||||
{'group_id': group.id})
|
||||
group.status = 'error'
|
||||
@ -198,7 +198,7 @@ class SchedulerManager(manager.CleanableManager, manager.Manager):
|
||||
|
||||
def migrate_volume(self, context, volume, backend, force_copy,
|
||||
request_spec, filter_properties):
|
||||
"""Ensure that the host exists and can accept the volume."""
|
||||
"""Ensure that the backend exists and can accept the volume."""
|
||||
self._wait_for_scheduler()
|
||||
|
||||
def _migrate_volume_set_error(self, context, ex, request_spec):
|
||||
@ -214,10 +214,10 @@ class SchedulerManager(manager.CleanableManager, manager.Manager):
|
||||
context, ex, request_spec)
|
||||
|
||||
try:
|
||||
tgt_backend = self.driver.host_passes_filters(context, backend,
|
||||
request_spec,
|
||||
filter_properties)
|
||||
except exception.NoValidHost as ex:
|
||||
tgt_backend = self.driver.backend_passes_filters(context, backend,
|
||||
request_spec,
|
||||
filter_properties)
|
||||
except exception.NoValidBackend as ex:
|
||||
_migrate_volume_set_error(self, context, ex, request_spec)
|
||||
except Exception as ex:
|
||||
with excutils.save_and_reraise_exception():
|
||||
@ -269,19 +269,20 @@ class SchedulerManager(manager.CleanableManager, manager.Manager):
|
||||
migration_policy = 'never'
|
||||
|
||||
try:
|
||||
tgt_host = self.driver.find_retype_host(context, request_spec,
|
||||
filter_properties,
|
||||
migration_policy)
|
||||
tgt_backend = self.driver.find_retype_backend(context,
|
||||
request_spec,
|
||||
filter_properties,
|
||||
migration_policy)
|
||||
except Exception as ex:
|
||||
# Not having a valid host is an expected exception, so we don't
|
||||
# reraise on it.
|
||||
reraise = not isinstance(ex, exception.NoValidHost)
|
||||
reraise = not isinstance(ex, exception.NoValidBackend)
|
||||
with excutils.save_and_reraise_exception(reraise=reraise):
|
||||
_retype_volume_set_error(self, context, ex, request_spec,
|
||||
volume, reservations)
|
||||
else:
|
||||
volume_rpcapi.VolumeAPI().retype(context, volume,
|
||||
new_type['id'], tgt_host,
|
||||
new_type['id'], tgt_backend,
|
||||
migration_policy,
|
||||
reservations,
|
||||
old_reservations)
|
||||
@ -298,11 +299,11 @@ class SchedulerManager(manager.CleanableManager, manager.Manager):
|
||||
context, ex, request_spec)
|
||||
|
||||
try:
|
||||
self.driver.host_passes_filters(context,
|
||||
volume.service_topic_queue,
|
||||
request_spec,
|
||||
filter_properties)
|
||||
except exception.NoValidHost as ex:
|
||||
self.driver.backend_passes_filters(context,
|
||||
volume.service_topic_queue,
|
||||
request_spec,
|
||||
filter_properties)
|
||||
except exception.NoValidBackend as ex:
|
||||
_manage_existing_set_error(self, context, ex, request_spec)
|
||||
except Exception as ex:
|
||||
with excutils.save_and_reraise_exception():
|
||||
@ -333,12 +334,12 @@ class SchedulerManager(manager.CleanableManager, manager.Manager):
|
||||
|
||||
filter_properties['new_size'] = new_size
|
||||
try:
|
||||
self.driver.host_passes_filters(context,
|
||||
volume.service_topic_queue,
|
||||
request_spec, filter_properties)
|
||||
self.driver.backend_passes_filters(context,
|
||||
volume.service_topic_queue,
|
||||
request_spec, filter_properties)
|
||||
volume_rpcapi.VolumeAPI().extend_volume(context, volume, new_size,
|
||||
reservations)
|
||||
except exception.NoValidHost as ex:
|
||||
except exception.NoValidBackend as ex:
|
||||
QUOTAS.rollback(context, reservations,
|
||||
project_id=volume.project_id)
|
||||
_extend_volume_set_error(self, context, ex, request_spec)
|
||||
|
@ -413,11 +413,13 @@ class ModelsObjectComparatorMixin(object):
|
||||
def _assertEqualListsOfObjects(self, objs1, objs2, ignored_keys=None,
|
||||
msg=None):
|
||||
obj_to_dict = lambda o: self._dict_from_object(o, ignored_keys)
|
||||
sort_key = lambda d: [d[k] for k in sorted(d)]
|
||||
conv_and_sort = lambda obj: sorted(map(obj_to_dict, obj), key=sort_key)
|
||||
|
||||
self.assertListEqual(conv_and_sort(objs1), conv_and_sort(objs2),
|
||||
msg=msg)
|
||||
objs1 = map(obj_to_dict, objs1)
|
||||
objs2 = list(map(obj_to_dict, objs2))
|
||||
# We don't care about the order of the lists, as long as they are in
|
||||
for obj1 in objs1:
|
||||
self.assertIn(obj1, objs2)
|
||||
objs2.remove(obj1)
|
||||
self.assertEqual([], objs2)
|
||||
|
||||
def _assertEqualListsOfPrimitivesAsSets(self, primitives1, primitives2):
|
||||
self.assertEqual(len(primitives1), len(primitives2))
|
||||
|
@ -62,6 +62,7 @@ class BaseAdminTest(test.TestCase):
|
||||
def _create_volume(self, context, updates=None):
|
||||
db_volume = {'status': 'available',
|
||||
'host': 'test',
|
||||
'binary': 'cinder-volume',
|
||||
'availability_zone': 'fake_zone',
|
||||
'attach_status': fields.VolumeAttachStatus.DETACHED}
|
||||
if updates:
|
||||
@ -502,10 +503,12 @@ class AdminActionsTest(BaseAdminTest):
|
||||
db.service_create(self.ctx,
|
||||
{'host': 'test',
|
||||
'topic': constants.VOLUME_TOPIC,
|
||||
'binary': 'cinder-volume',
|
||||
'created_at': timeutils.utcnow()})
|
||||
db.service_create(self.ctx,
|
||||
{'host': 'test2',
|
||||
'topic': constants.VOLUME_TOPIC,
|
||||
'binary': 'cinder-volume',
|
||||
'created_at': timeutils.utcnow()})
|
||||
db.service_create(self.ctx,
|
||||
{'host': 'clustered_host',
|
||||
|
@ -36,6 +36,7 @@ def _get_filters_sentinel():
|
||||
'race_preventer': mock.sentinel.race_preventer,
|
||||
'last_heartbeat': mock.sentinel.last_heartbeat,
|
||||
'num_hosts': mock.sentinel.num_hosts,
|
||||
'name_match_level': mock.sentinel.name_match_level,
|
||||
'num_down_hosts': mock.sentinel.num_down_hosts}
|
||||
|
||||
|
||||
|
@ -90,9 +90,9 @@ class FakeHostManager(host_manager.HostManager):
|
||||
}
|
||||
|
||||
|
||||
class FakeHostState(host_manager.HostState):
|
||||
class FakeBackendState(host_manager.BackendState):
|
||||
def __init__(self, host, attribute_dict):
|
||||
super(FakeHostState, self).__init__(host, None)
|
||||
super(FakeBackendState, self).__init__(host, None)
|
||||
for (key, val) in attribute_dict.items():
|
||||
setattr(self, key, val)
|
||||
|
||||
|
@ -42,11 +42,11 @@ class AllocatedCapacityWeigherTestCase(test.TestCase):
|
||||
weight_properties)[0]
|
||||
|
||||
@mock.patch('cinder.db.sqlalchemy.api.service_get_all')
|
||||
def _get_all_hosts(self, _mock_service_get_all, disabled=False):
|
||||
def _get_all_backends(self, _mock_service_get_all, disabled=False):
|
||||
ctxt = context.get_admin_context()
|
||||
fakes.mock_host_manager_db_calls(_mock_service_get_all,
|
||||
disabled=disabled)
|
||||
host_states = self.host_manager.get_all_host_states(ctxt)
|
||||
host_states = self.host_manager.get_all_backend_states(ctxt)
|
||||
_mock_service_get_all.assert_called_once_with(
|
||||
ctxt,
|
||||
None, # backend_match_level
|
||||
@ -54,7 +54,7 @@ class AllocatedCapacityWeigherTestCase(test.TestCase):
|
||||
return host_states
|
||||
|
||||
def test_default_of_spreading_first(self):
|
||||
hostinfo_list = self._get_all_hosts()
|
||||
hostinfo_list = self._get_all_backends()
|
||||
|
||||
# host1: allocated_capacity_gb=0, weight=0 Norm=0.0
|
||||
# host2: allocated_capacity_gb=1748, weight=-1748
|
||||
@ -70,7 +70,7 @@ class AllocatedCapacityWeigherTestCase(test.TestCase):
|
||||
|
||||
def test_capacity_weight_multiplier1(self):
|
||||
self.flags(allocated_capacity_weight_multiplier=1.0)
|
||||
hostinfo_list = self._get_all_hosts()
|
||||
hostinfo_list = self._get_all_backends()
|
||||
|
||||
# host1: allocated_capacity_gb=0, weight=0 Norm=0.0
|
||||
# host2: allocated_capacity_gb=1748, weight=1748
|
||||
@ -86,7 +86,7 @@ class AllocatedCapacityWeigherTestCase(test.TestCase):
|
||||
|
||||
def test_capacity_weight_multiplier2(self):
|
||||
self.flags(allocated_capacity_weight_multiplier=-2.0)
|
||||
hostinfo_list = self._get_all_hosts()
|
||||
hostinfo_list = self._get_all_backends()
|
||||
|
||||
# host1: allocated_capacity_gb=0, weight=0 Norm=0.0
|
||||
# host2: allocated_capacity_gb=1748, weight=-3496
|
||||
|
@ -178,8 +178,8 @@ class TestBaseFilterHandler(test.TestCase):
|
||||
def test_get_filtered_objects_info_and_debug_log_none_returned(self):
|
||||
|
||||
all_filters = [FilterA, FilterA, FilterB]
|
||||
fake_hosts = [host_manager.HostState('fake_host%s' % x, None)
|
||||
for x in range(1, 4)]
|
||||
fake_backends = [host_manager.BackendState('fake_be%s' % x, None)
|
||||
for x in range(1, 4)]
|
||||
|
||||
filt_props = {"request_spec": {'volume_id': fake.VOLUME_ID,
|
||||
'volume_properties': {'project_id': fake.PROJECT_ID,
|
||||
@ -187,7 +187,7 @@ class TestBaseFilterHandler(test.TestCase):
|
||||
'host': 'host4'}}}
|
||||
with mock.patch.object(base_filter, 'LOG') as mock_log:
|
||||
result = self.handler.get_filtered_objects(
|
||||
all_filters, fake_hosts, filt_props)
|
||||
all_filters, fake_backends, filt_props)
|
||||
self.assertFalse(result)
|
||||
msg = "with volume ID '%s'" % fake.VOLUME_ID
|
||||
# FilterA should leave Host1 and Host2; FilterB should leave None.
|
||||
@ -197,8 +197,8 @@ class TestBaseFilterHandler(test.TestCase):
|
||||
self.assertIn(msg, cargs)
|
||||
self.assertIn(exp_output, cargs)
|
||||
|
||||
exp_output = ("[('FilterA', ['fake_host2', 'fake_host3']), "
|
||||
"('FilterA', ['fake_host3']), "
|
||||
exp_output = ("[('FilterA', ['fake_be2', 'fake_be3']), "
|
||||
"('FilterA', ['fake_be3']), "
|
||||
+ "('FilterB', None)]")
|
||||
cargs = mock_log.debug.call_args[0][0]
|
||||
self.assertIn(msg, cargs)
|
||||
|
@ -45,16 +45,16 @@ class CapacityWeigherTestCase(test.TestCase):
|
||||
weight_properties)
|
||||
|
||||
@mock.patch('cinder.db.sqlalchemy.api.service_get_all')
|
||||
def _get_all_hosts(self, _mock_service_get_all, disabled=False):
|
||||
def _get_all_backends(self, _mock_service_get_all, disabled=False):
|
||||
ctxt = context.get_admin_context()
|
||||
fakes.mock_host_manager_db_calls(_mock_service_get_all,
|
||||
disabled=disabled)
|
||||
host_states = self.host_manager.get_all_host_states(ctxt)
|
||||
backend_states = self.host_manager.get_all_backend_states(ctxt)
|
||||
_mock_service_get_all.assert_called_once_with(
|
||||
ctxt,
|
||||
None, # backend_match_level
|
||||
topic=constants.VOLUME_TOPIC, disabled=disabled)
|
||||
return host_states
|
||||
return backend_states
|
||||
|
||||
# If thin and thin_provisioning_support are True,
|
||||
# use the following formula:
|
||||
@ -78,7 +78,7 @@ class CapacityWeigherTestCase(test.TestCase):
|
||||
)
|
||||
@ddt.unpack
|
||||
def test_default_of_spreading_first(self, volume_type, winner):
|
||||
hostinfo_list = self._get_all_hosts()
|
||||
backend_info_list = self._get_all_backends()
|
||||
|
||||
# Results for the 1st test
|
||||
# {'provisioning:type': 'thin'}:
|
||||
@ -106,7 +106,7 @@ class CapacityWeigherTestCase(test.TestCase):
|
||||
'volume_type': volume_type,
|
||||
}
|
||||
weighed_host = self._get_weighed_hosts(
|
||||
hostinfo_list,
|
||||
backend_info_list,
|
||||
weight_properties=weight_properties)[0]
|
||||
self.assertEqual(1.0, weighed_host.weight)
|
||||
self.assertEqual(winner, utils.extract_host(weighed_host.obj.host))
|
||||
@ -126,7 +126,7 @@ class CapacityWeigherTestCase(test.TestCase):
|
||||
@ddt.unpack
|
||||
def test_capacity_weight_multiplier1(self, volume_type, winner):
|
||||
self.flags(capacity_weight_multiplier=-1.0)
|
||||
hostinfo_list = self._get_all_hosts()
|
||||
backend_info_list = self._get_all_backends()
|
||||
|
||||
# Results for the 1st test
|
||||
# {'provisioning:type': 'thin'}:
|
||||
@ -154,7 +154,7 @@ class CapacityWeigherTestCase(test.TestCase):
|
||||
'volume_type': volume_type,
|
||||
}
|
||||
weighed_host = self._get_weighed_hosts(
|
||||
hostinfo_list,
|
||||
backend_info_list,
|
||||
weight_properties=weight_properties)[0]
|
||||
self.assertEqual(0.0, weighed_host.weight)
|
||||
self.assertEqual(winner, utils.extract_host(weighed_host.obj.host))
|
||||
@ -174,7 +174,7 @@ class CapacityWeigherTestCase(test.TestCase):
|
||||
@ddt.unpack
|
||||
def test_capacity_weight_multiplier2(self, volume_type, winner):
|
||||
self.flags(capacity_weight_multiplier=2.0)
|
||||
hostinfo_list = self._get_all_hosts()
|
||||
backend_info_list = self._get_all_backends()
|
||||
|
||||
# Results for the 1st test
|
||||
# {'provisioning:type': 'thin'}:
|
||||
@ -202,7 +202,7 @@ class CapacityWeigherTestCase(test.TestCase):
|
||||
'volume_type': volume_type,
|
||||
}
|
||||
weighed_host = self._get_weighed_hosts(
|
||||
hostinfo_list,
|
||||
backend_info_list,
|
||||
weight_properties=weight_properties)[0]
|
||||
self.assertEqual(1.0 * 2, weighed_host.weight)
|
||||
self.assertEqual(winner, utils.extract_host(weighed_host.obj.host))
|
||||
@ -210,7 +210,7 @@ class CapacityWeigherTestCase(test.TestCase):
|
||||
def test_capacity_weight_no_unknown_or_infinite(self):
|
||||
self.flags(capacity_weight_multiplier=-1.0)
|
||||
del self.host_manager.service_states['host5']
|
||||
hostinfo_list = self._get_all_hosts()
|
||||
backend_info_list = self._get_all_backends()
|
||||
|
||||
# host1: thin_provisioning_support = False
|
||||
# free_capacity_gb=1024,
|
||||
@ -229,7 +229,7 @@ class CapacityWeigherTestCase(test.TestCase):
|
||||
# Norm=0.0
|
||||
|
||||
# so, host4 should win:
|
||||
weighed_hosts = self._get_weighed_hosts(hostinfo_list)
|
||||
weighed_hosts = self._get_weighed_hosts(backend_info_list)
|
||||
best_host = weighed_hosts[0]
|
||||
self.assertEqual(0.0, best_host.weight)
|
||||
self.assertEqual('host4', utils.extract_host(best_host.obj.host))
|
||||
@ -250,7 +250,7 @@ class CapacityWeigherTestCase(test.TestCase):
|
||||
'thick_provisioning_support': False,
|
||||
'reserved_percentage': 5,
|
||||
'timestamp': datetime.utcnow()}
|
||||
hostinfo_list = self._get_all_hosts()
|
||||
backend_info_list = self._get_all_backends()
|
||||
|
||||
# host1: thin_provisioning_support = False
|
||||
# free_capacity_gb=1024,
|
||||
@ -271,7 +271,7 @@ class CapacityWeigherTestCase(test.TestCase):
|
||||
# Norm=-1.0
|
||||
|
||||
# so, host4 should win:
|
||||
weighed_hosts = self._get_weighed_hosts(hostinfo_list)
|
||||
weighed_hosts = self._get_weighed_hosts(backend_info_list)
|
||||
best_host = weighed_hosts[0]
|
||||
self.assertEqual(0.0, best_host.weight)
|
||||
self.assertEqual('host4', utils.extract_host(best_host.obj.host))
|
||||
@ -292,7 +292,7 @@ class CapacityWeigherTestCase(test.TestCase):
|
||||
'thick_provisioning_support': False,
|
||||
'reserved_percentage': 5,
|
||||
'timestamp': datetime.utcnow()}
|
||||
hostinfo_list = self._get_all_hosts()
|
||||
backend_info_list = self._get_all_backends()
|
||||
|
||||
# host1: thin_provisioning_support = False
|
||||
# free_capacity_gb=1024,
|
||||
@ -313,7 +313,7 @@ class CapacityWeigherTestCase(test.TestCase):
|
||||
# Norm=-1.0
|
||||
|
||||
# so, host4 should win:
|
||||
weighed_hosts = self._get_weighed_hosts(hostinfo_list)
|
||||
weighed_hosts = self._get_weighed_hosts(backend_info_list)
|
||||
best_host = weighed_hosts[0]
|
||||
self.assertEqual(0.0, best_host.weight)
|
||||
self.assertEqual('host4', utils.extract_host(best_host.obj.host))
|
||||
@ -334,7 +334,7 @@ class CapacityWeigherTestCase(test.TestCase):
|
||||
'thick_provisioning_support': False,
|
||||
'reserved_percentage': 5,
|
||||
'timestamp': datetime.utcnow()}
|
||||
hostinfo_list = self._get_all_hosts()
|
||||
backend_info_list = self._get_all_backends()
|
||||
|
||||
# host1: thin_provisioning_support = False
|
||||
# free_capacity_gb=1024,
|
||||
@ -355,7 +355,7 @@ class CapacityWeigherTestCase(test.TestCase):
|
||||
# Norm=-1.0
|
||||
|
||||
# so, host4 should win:
|
||||
weighed_hosts = self._get_weighed_hosts(hostinfo_list)
|
||||
weighed_hosts = self._get_weighed_hosts(backend_info_list)
|
||||
best_host = weighed_hosts[0]
|
||||
self.assertEqual(0.0, best_host.weight)
|
||||
self.assertEqual('host4', utils.extract_host(best_host.obj.host))
|
||||
@ -376,7 +376,7 @@ class CapacityWeigherTestCase(test.TestCase):
|
||||
'thick_provisioning_support': False,
|
||||
'reserved_percentage': 5,
|
||||
'timestamp': datetime.utcnow()}
|
||||
hostinfo_list = self._get_all_hosts()
|
||||
backend_info_list = self._get_all_backends()
|
||||
|
||||
# host1: thin_provisioning_support = False
|
||||
# free_capacity_gb=1024,
|
||||
@ -397,7 +397,7 @@ class CapacityWeigherTestCase(test.TestCase):
|
||||
# Norm=-1.0
|
||||
|
||||
# so, host4 should win:
|
||||
weighed_hosts = self._get_weighed_hosts(hostinfo_list)
|
||||
weighed_hosts = self._get_weighed_hosts(backend_info_list)
|
||||
best_host = weighed_hosts[0]
|
||||
self.assertEqual(0.0, best_host.weight)
|
||||
self.assertEqual('host4', utils.extract_host(best_host.obj.host))
|
||||
|
@ -50,7 +50,7 @@ class ChanceWeigherTestCase(test.TestCase):
|
||||
# ensure HostManager can load the ChanceWeigher
|
||||
# via the entry points mechanism
|
||||
hm = host_manager.HostManager()
|
||||
weighers = hm._choose_host_weighers('ChanceWeigher')
|
||||
weighers = hm._choose_backend_weighers('ChanceWeigher')
|
||||
self.assertEqual(1, len(weighers))
|
||||
self.assertEqual(weighers[0], chance.ChanceWeigher)
|
||||
|
||||
@ -58,7 +58,8 @@ class ChanceWeigherTestCase(test.TestCase):
|
||||
# ensure we don't lose any hosts when weighing with
|
||||
# the ChanceWeigher
|
||||
hm = host_manager.HostManager()
|
||||
fake_hosts = [host_manager.HostState('fake_host%s' % x, None)
|
||||
for x in range(1, 5)]
|
||||
weighed_hosts = hm.get_weighed_hosts(fake_hosts, {}, 'ChanceWeigher')
|
||||
self.assertEqual(4, len(weighed_hosts))
|
||||
fake_backends = [host_manager.BackendState('fake_be%s' % x, None)
|
||||
for x in range(1, 5)]
|
||||
weighed_backends = hm.get_weighed_backends(fake_backends, {},
|
||||
'ChanceWeigher')
|
||||
self.assertEqual(4, len(weighed_backends))
|
||||
|
@ -35,7 +35,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
driver_cls = filter_scheduler.FilterScheduler
|
||||
|
||||
def test_create_group_no_hosts(self):
|
||||
# Ensure empty hosts result in NoValidHosts exception.
|
||||
# Ensure empty hosts result in NoValidBackend exception.
|
||||
sched = fakes.FakeFilterScheduler()
|
||||
|
||||
fake_context = context.RequestContext('user', 'project')
|
||||
@ -51,7 +51,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
group_spec = {'group_type': {'name': 'GrpType'},
|
||||
'volume_properties': {'project_id': 1,
|
||||
'size': 0}}
|
||||
self.assertRaises(exception.NoValidHost,
|
||||
self.assertRaises(exception.NoValidBackend,
|
||||
sched.schedule_create_group,
|
||||
fake_context, 'faki-id1', group_spec,
|
||||
request_spec_list, {}, [])
|
||||
@ -87,7 +87,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
self.assertTrue(_mock_service_get_all.called)
|
||||
|
||||
def test_create_consistencygroup_no_hosts(self):
|
||||
# Ensure empty hosts result in NoValidHosts exception.
|
||||
# Ensure empty hosts result in NoValidBackend exception.
|
||||
sched = fakes.FakeFilterScheduler()
|
||||
|
||||
fake_context = context.RequestContext('user', 'project')
|
||||
@ -100,7 +100,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
'volume_type': {'name': 'Type2',
|
||||
'extra_specs': {}}}
|
||||
request_spec_list = [request_spec, request_spec2]
|
||||
self.assertRaises(exception.NoValidHost,
|
||||
self.assertRaises(exception.NoValidBackend,
|
||||
sched.schedule_create_consistencygroup,
|
||||
fake_context, 'faki-id1', request_spec_list, {})
|
||||
|
||||
@ -161,7 +161,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
self.assertTrue(_mock_service_get_all.called)
|
||||
|
||||
def test_create_volume_no_hosts(self):
|
||||
# Ensure empty hosts/child_zones result in NoValidHosts exception.
|
||||
# Ensure empty hosts/child_zones result in NoValidBackend exception.
|
||||
sched = fakes.FakeFilterScheduler()
|
||||
|
||||
fake_context = context.RequestContext('user', 'project')
|
||||
@ -170,8 +170,9 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
'volume_type': {'name': 'LVM_iSCSI'},
|
||||
'volume_id': fake.VOLUME_ID}
|
||||
request_spec = objects.RequestSpec.from_primitives(request_spec)
|
||||
self.assertRaises(exception.NoValidHost, sched.schedule_create_volume,
|
||||
fake_context, request_spec, {})
|
||||
self.assertRaises(exception.NoValidBackend,
|
||||
sched.schedule_create_volume, fake_context,
|
||||
request_spec, {})
|
||||
|
||||
def test_create_volume_no_hosts_invalid_req(self):
|
||||
sched = fakes.FakeFilterScheduler()
|
||||
@ -183,7 +184,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
'size': 1},
|
||||
'volume_type': {'name': 'LVM_iSCSI'}}
|
||||
request_spec = objects.RequestSpec.from_primitives(request_spec)
|
||||
self.assertRaises(exception.NoValidHost,
|
||||
self.assertRaises(exception.NoValidBackend,
|
||||
sched.schedule_create_volume,
|
||||
fake_context,
|
||||
request_spec,
|
||||
@ -199,15 +200,15 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
'size': 1},
|
||||
'volume_id': fake.VOLUME_ID}
|
||||
request_spec = objects.RequestSpec.from_primitives(request_spec)
|
||||
self.assertRaises(exception.NoValidHost,
|
||||
self.assertRaises(exception.NoValidBackend,
|
||||
sched.schedule_create_volume,
|
||||
fake_context,
|
||||
request_spec,
|
||||
{})
|
||||
|
||||
@mock.patch('cinder.scheduler.host_manager.HostManager.'
|
||||
'get_all_host_states')
|
||||
def test_create_volume_non_admin(self, _mock_get_all_host_states):
|
||||
'get_all_backend_states')
|
||||
def test_create_volume_non_admin(self, _mock_get_all_backend_states):
|
||||
# Test creating a volume locally using create_volume, passing
|
||||
# a non-admin context. DB actions should work.
|
||||
self.was_admin = False
|
||||
@ -219,7 +220,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
return {}
|
||||
|
||||
sched = fakes.FakeFilterScheduler()
|
||||
_mock_get_all_host_states.side_effect = fake_get
|
||||
_mock_get_all_backend_states.side_effect = fake_get
|
||||
|
||||
fake_context = context.RequestContext('user', 'project')
|
||||
|
||||
@ -228,8 +229,9 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
'volume_type': {'name': 'LVM_iSCSI'},
|
||||
'volume_id': fake.VOLUME_ID}
|
||||
request_spec = objects.RequestSpec.from_primitives(request_spec)
|
||||
self.assertRaises(exception.NoValidHost, sched.schedule_create_volume,
|
||||
fake_context, request_spec, {})
|
||||
self.assertRaises(exception.NoValidBackend,
|
||||
sched.schedule_create_volume, fake_context,
|
||||
request_spec, {})
|
||||
self.assertTrue(self.was_admin)
|
||||
|
||||
@mock.patch('cinder.db.service_get_all')
|
||||
@ -393,38 +395,38 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
retry = dict(num_attempts=2)
|
||||
filter_properties = dict(retry=retry)
|
||||
|
||||
self.assertRaises(exception.NoValidHost, sched._schedule, self.context,
|
||||
request_spec, filter_properties=filter_properties)
|
||||
self.assertRaises(exception.NoValidBackend, sched._schedule,
|
||||
self.context, request_spec,
|
||||
filter_properties=filter_properties)
|
||||
|
||||
def test_add_retry_host(self):
|
||||
retry = dict(num_attempts=1, hosts=[])
|
||||
def test_add_retry_backend(self):
|
||||
retry = dict(num_attempts=1, backends=[])
|
||||
filter_properties = dict(retry=retry)
|
||||
host = "fakehost"
|
||||
backend = "fakehost"
|
||||
|
||||
sched = fakes.FakeFilterScheduler()
|
||||
sched._add_retry_host(filter_properties, host)
|
||||
sched._add_retry_backend(filter_properties, backend)
|
||||
|
||||
hosts = filter_properties['retry']['hosts']
|
||||
self.assertEqual(1, len(hosts))
|
||||
self.assertEqual(host, hosts[0])
|
||||
backends = filter_properties['retry']['backends']
|
||||
self.assertListEqual([backend], backends)
|
||||
|
||||
def test_post_select_populate(self):
|
||||
# Test addition of certain filter props after a node is selected.
|
||||
retry = {'hosts': [], 'num_attempts': 1}
|
||||
retry = {'backends': [], 'num_attempts': 1}
|
||||
filter_properties = {'retry': retry}
|
||||
sched = fakes.FakeFilterScheduler()
|
||||
|
||||
host_state = host_manager.HostState('host', None)
|
||||
host_state.total_capacity_gb = 1024
|
||||
backend_state = host_manager.BackendState('host', None)
|
||||
backend_state.total_capacity_gb = 1024
|
||||
sched._post_select_populate_filter_properties(filter_properties,
|
||||
host_state)
|
||||
backend_state)
|
||||
|
||||
self.assertEqual('host',
|
||||
filter_properties['retry']['hosts'][0])
|
||||
filter_properties['retry']['backends'][0])
|
||||
|
||||
self.assertEqual(1024, host_state.total_capacity_gb)
|
||||
self.assertEqual(1024, backend_state.total_capacity_gb)
|
||||
|
||||
def _host_passes_filters_setup(self, mock_obj):
|
||||
def _backend_passes_filters_setup(self, mock_obj):
|
||||
sched = fakes.FakeFilterScheduler()
|
||||
sched.host_manager = fakes.FakeHostManager()
|
||||
fake_context = context.RequestContext('user', 'project',
|
||||
@ -435,48 +437,48 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
return (sched, fake_context)
|
||||
|
||||
@mock.patch('cinder.db.service_get_all')
|
||||
def test_host_passes_filters_happy_day(self, _mock_service_get_topic):
|
||||
"""Do a successful pass through of with host_passes_filters()."""
|
||||
sched, ctx = self._host_passes_filters_setup(
|
||||
def test_backend_passes_filters_happy_day(self, _mock_service_get_topic):
|
||||
"""Do a successful pass through of with backend_passes_filters()."""
|
||||
sched, ctx = self._backend_passes_filters_setup(
|
||||
_mock_service_get_topic)
|
||||
request_spec = {'volume_id': fake.VOLUME_ID,
|
||||
'volume_type': {'name': 'LVM_iSCSI'},
|
||||
'volume_properties': {'project_id': 1,
|
||||
'size': 1}}
|
||||
request_spec = objects.RequestSpec.from_primitives(request_spec)
|
||||
ret_host = sched.host_passes_filters(ctx, 'host1#lvm1',
|
||||
request_spec, {})
|
||||
ret_host = sched.backend_passes_filters(ctx, 'host1#lvm1',
|
||||
request_spec, {})
|
||||
self.assertEqual('host1', utils.extract_host(ret_host.host))
|
||||
self.assertTrue(_mock_service_get_topic.called)
|
||||
|
||||
@mock.patch('cinder.db.service_get_all')
|
||||
def test_host_passes_filters_default_pool_happy_day(
|
||||
def test_backend_passes_filters_default_pool_happy_day(
|
||||
self, _mock_service_get_topic):
|
||||
"""Do a successful pass through of with host_passes_filters()."""
|
||||
sched, ctx = self._host_passes_filters_setup(
|
||||
"""Do a successful pass through of with backend_passes_filters()."""
|
||||
sched, ctx = self._backend_passes_filters_setup(
|
||||
_mock_service_get_topic)
|
||||
request_spec = {'volume_id': fake.VOLUME_ID,
|
||||
'volume_type': {'name': 'LVM_iSCSI'},
|
||||
'volume_properties': {'project_id': 1,
|
||||
'size': 1}}
|
||||
request_spec = objects.RequestSpec.from_primitives(request_spec)
|
||||
ret_host = sched.host_passes_filters(ctx, 'host5#_pool0',
|
||||
request_spec, {})
|
||||
ret_host = sched.backend_passes_filters(ctx, 'host5#_pool0',
|
||||
request_spec, {})
|
||||
self.assertEqual('host5', utils.extract_host(ret_host.host))
|
||||
self.assertTrue(_mock_service_get_topic.called)
|
||||
|
||||
@mock.patch('cinder.db.service_get_all')
|
||||
def test_host_passes_filters_no_capacity(self, _mock_service_get_topic):
|
||||
def test_backend_passes_filters_no_capacity(self, _mock_service_get_topic):
|
||||
"""Fail the host due to insufficient capacity."""
|
||||
sched, ctx = self._host_passes_filters_setup(
|
||||
sched, ctx = self._backend_passes_filters_setup(
|
||||
_mock_service_get_topic)
|
||||
request_spec = {'volume_id': fake.VOLUME_ID,
|
||||
'volume_type': {'name': 'LVM_iSCSI'},
|
||||
'volume_properties': {'project_id': 1,
|
||||
'size': 1024}}
|
||||
request_spec = objects.RequestSpec.from_primitives(request_spec)
|
||||
self.assertRaises(exception.NoValidHost,
|
||||
sched.host_passes_filters,
|
||||
self.assertRaises(exception.NoValidBackend,
|
||||
sched.backend_passes_filters,
|
||||
ctx, 'host1#lvm1', request_spec, {})
|
||||
self.assertTrue(_mock_service_get_topic.called)
|
||||
|
||||
@ -486,7 +488,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
# policy=never. host4 doesn't have enough space to hold an additional
|
||||
# 200GB, but it is already the host of this volume and should not be
|
||||
# counted twice.
|
||||
sched, ctx = self._host_passes_filters_setup(
|
||||
sched, ctx = self._backend_passes_filters_setup(
|
||||
_mock_service_get_topic)
|
||||
extra_specs = {'volume_backend_name': 'lvm4'}
|
||||
request_spec = {'volume_id': fake.VOLUME_ID,
|
||||
@ -496,9 +498,9 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
'size': 200,
|
||||
'host': 'host4#lvm4'}}
|
||||
request_spec = objects.RequestSpec.from_primitives(request_spec)
|
||||
host_state = sched.find_retype_host(ctx, request_spec,
|
||||
filter_properties={},
|
||||
migration_policy='never')
|
||||
host_state = sched.find_retype_backend(ctx, request_spec,
|
||||
filter_properties={},
|
||||
migration_policy='never')
|
||||
self.assertEqual('host4', utils.extract_host(host_state.host))
|
||||
|
||||
@mock.patch('cinder.db.service_get_all')
|
||||
@ -508,7 +510,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
# policy=never. host4 doesn't have enough space to hold an additional
|
||||
# 200GB, but it is already the host of this volume and should not be
|
||||
# counted twice.
|
||||
sched, ctx = self._host_passes_filters_setup(
|
||||
sched, ctx = self._backend_passes_filters_setup(
|
||||
_mock_service_get_topic)
|
||||
extra_specs = {'volume_backend_name': 'lvm3'}
|
||||
request_spec = {'volume_id': fake.VOLUME_ID,
|
||||
@ -518,16 +520,16 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
'size': 200,
|
||||
'host': 'host3#lvm3'}}
|
||||
request_spec = objects.RequestSpec.from_primitives(request_spec)
|
||||
host_state = sched.find_retype_host(ctx, request_spec,
|
||||
filter_properties={},
|
||||
migration_policy='never')
|
||||
host_state = sched.find_retype_backend(ctx, request_spec,
|
||||
filter_properties={},
|
||||
migration_policy='never')
|
||||
self.assertEqual('host3#lvm3', host_state.host)
|
||||
|
||||
@mock.patch('cinder.db.service_get_all')
|
||||
def test_retype_policy_never_migrate_fail(self, _mock_service_get_topic):
|
||||
# Retype should fail if current host doesn't pass filters and
|
||||
# policy=never.
|
||||
sched, ctx = self._host_passes_filters_setup(
|
||||
sched, ctx = self._backend_passes_filters_setup(
|
||||
_mock_service_get_topic)
|
||||
extra_specs = {'volume_backend_name': 'lvm1'}
|
||||
request_spec = {'volume_id': fake.VOLUME_ID,
|
||||
@ -537,15 +539,15 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
'size': 200,
|
||||
'host': 'host4'}}
|
||||
request_spec = objects.RequestSpec.from_primitives(request_spec)
|
||||
self.assertRaises(exception.NoValidHost, sched.find_retype_host, ctx,
|
||||
request_spec, filter_properties={},
|
||||
self.assertRaises(exception.NoValidBackend, sched.find_retype_backend,
|
||||
ctx, request_spec, filter_properties={},
|
||||
migration_policy='never')
|
||||
|
||||
@mock.patch('cinder.db.service_get_all')
|
||||
def test_retype_policy_demand_migrate_pass(self, _mock_service_get_topic):
|
||||
# Retype should pass if current host fails filters but another host
|
||||
# is suitable when policy=on-demand.
|
||||
sched, ctx = self._host_passes_filters_setup(
|
||||
sched, ctx = self._backend_passes_filters_setup(
|
||||
_mock_service_get_topic)
|
||||
extra_specs = {'volume_backend_name': 'lvm1'}
|
||||
request_spec = {'volume_id': fake.VOLUME_ID,
|
||||
@ -555,16 +557,16 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
'size': 200,
|
||||
'host': 'host4'}}
|
||||
request_spec = objects.RequestSpec.from_primitives(request_spec)
|
||||
host_state = sched.find_retype_host(ctx, request_spec,
|
||||
filter_properties={},
|
||||
migration_policy='on-demand')
|
||||
host_state = sched.find_retype_backend(ctx, request_spec,
|
||||
filter_properties={},
|
||||
migration_policy='on-demand')
|
||||
self.assertEqual('host1', utils.extract_host(host_state.host))
|
||||
|
||||
@mock.patch('cinder.db.service_get_all')
|
||||
def test_retype_policy_demand_migrate_fail(self, _mock_service_get_topic):
|
||||
# Retype should fail if current host doesn't pass filters and
|
||||
# no other suitable candidates exist even if policy=on-demand.
|
||||
sched, ctx = self._host_passes_filters_setup(
|
||||
sched, ctx = self._backend_passes_filters_setup(
|
||||
_mock_service_get_topic)
|
||||
extra_specs = {'volume_backend_name': 'lvm1'}
|
||||
request_spec = {'volume_id': fake.VOLUME_ID,
|
||||
@ -574,6 +576,6 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
||||
'size': 2048,
|
||||
'host': 'host4'}}
|
||||
request_spec = objects.RequestSpec.from_primitives(request_spec)
|
||||
self.assertRaises(exception.NoValidHost, sched.find_retype_host, ctx,
|
||||
request_spec, filter_properties={},
|
||||
self.assertRaises(exception.NoValidBackend, sched.find_retype_backend,
|
||||
ctx, request_spec, filter_properties={},
|
||||
migration_policy='on-demand')
|
||||
|
@ -23,7 +23,7 @@ from cinder.tests.unit.scheduler import fakes
|
||||
class GoodnessWeigherTestCase(test.TestCase):
|
||||
def test_goodness_weigher_with_no_goodness_function(self):
|
||||
weigher = goodness.GoodnessWeigher()
|
||||
host_state = fakes.FakeHostState('host1', {
|
||||
host_state = fakes.FakeBackendState('host1', {
|
||||
'host': 'host.example.com',
|
||||
'capabilities': {
|
||||
'foo': '50'
|
||||
@ -36,19 +36,19 @@ class GoodnessWeigherTestCase(test.TestCase):
|
||||
|
||||
def test_goodness_weigher_passing_host(self):
|
||||
weigher = goodness.GoodnessWeigher()
|
||||
host_state = fakes.FakeHostState('host1', {
|
||||
host_state = fakes.FakeBackendState('host1', {
|
||||
'host': 'host.example.com',
|
||||
'capabilities': {
|
||||
'goodness_function': '100'
|
||||
}
|
||||
})
|
||||
host_state_2 = fakes.FakeHostState('host2', {
|
||||
host_state_2 = fakes.FakeBackendState('host2', {
|
||||
'host': 'host2.example.com',
|
||||
'capabilities': {
|
||||
'goodness_function': '0'
|
||||
}
|
||||
})
|
||||
host_state_3 = fakes.FakeHostState('host3', {
|
||||
host_state_3 = fakes.FakeBackendState('host3', {
|
||||
'host': 'host3.example.com',
|
||||
'capabilities': {
|
||||
'goodness_function': '100 / 2'
|
||||
@ -65,7 +65,7 @@ class GoodnessWeigherTestCase(test.TestCase):
|
||||
|
||||
def test_goodness_weigher_capabilities_substitution(self):
|
||||
weigher = goodness.GoodnessWeigher()
|
||||
host_state = fakes.FakeHostState('host1', {
|
||||
host_state = fakes.FakeBackendState('host1', {
|
||||
'host': 'host.example.com',
|
||||
'capabilities': {
|
||||
'foo': 50,
|
||||
@ -79,7 +79,7 @@ class GoodnessWeigherTestCase(test.TestCase):
|
||||
|
||||
def test_goodness_weigher_extra_specs_substitution(self):
|
||||
weigher = goodness.GoodnessWeigher()
|
||||
host_state = fakes.FakeHostState('host1', {
|
||||
host_state = fakes.FakeBackendState('host1', {
|
||||
'host': 'host.example.com',
|
||||
'capabilities': {
|
||||
'goodness_function': '10 + extra.foo'
|
||||
@ -98,7 +98,7 @@ class GoodnessWeigherTestCase(test.TestCase):
|
||||
|
||||
def test_goodness_weigher_volume_substitution(self):
|
||||
weigher = goodness.GoodnessWeigher()
|
||||
host_state = fakes.FakeHostState('host1', {
|
||||
host_state = fakes.FakeBackendState('host1', {
|
||||
'host': 'host.example.com',
|
||||
'capabilities': {
|
||||
'goodness_function': '10 + volume.foo'
|
||||
@ -117,7 +117,7 @@ class GoodnessWeigherTestCase(test.TestCase):
|
||||
|
||||
def test_goodness_weigher_qos_substitution(self):
|
||||
weigher = goodness.GoodnessWeigher()
|
||||
host_state = fakes.FakeHostState('host1', {
|
||||
host_state = fakes.FakeBackendState('host1', {
|
||||
'host': 'host.example.com',
|
||||
'capabilities': {
|
||||
'goodness_function': '10 + qos.foo'
|
||||
@ -134,7 +134,7 @@ class GoodnessWeigherTestCase(test.TestCase):
|
||||
|
||||
def test_goodness_weigher_stats_substitution(self):
|
||||
weigher = goodness.GoodnessWeigher()
|
||||
host_state = fakes.FakeHostState('host1', {
|
||||
host_state = fakes.FakeBackendState('host1', {
|
||||
'host': 'host.example.com',
|
||||
'capabilities': {
|
||||
'goodness_function': 'stats.free_capacity_gb > 20'
|
||||
@ -148,7 +148,7 @@ class GoodnessWeigherTestCase(test.TestCase):
|
||||
|
||||
def test_goodness_weigher_invalid_substitution(self):
|
||||
weigher = goodness.GoodnessWeigher()
|
||||
host_state = fakes.FakeHostState('host1', {
|
||||
host_state = fakes.FakeBackendState('host1', {
|
||||
'host': 'host.example.com',
|
||||
'capabilities': {
|
||||
'goodness_function': '10 + stats.my_val'
|
||||
@ -162,13 +162,13 @@ class GoodnessWeigherTestCase(test.TestCase):
|
||||
|
||||
def test_goodness_weigher_host_rating_out_of_bounds(self):
|
||||
weigher = goodness.GoodnessWeigher()
|
||||
host_state = fakes.FakeHostState('host1', {
|
||||
host_state = fakes.FakeBackendState('host1', {
|
||||
'host': 'host.example.com',
|
||||
'capabilities': {
|
||||
'goodness_function': '-10'
|
||||
}
|
||||
})
|
||||
host_state_2 = fakes.FakeHostState('host2', {
|
||||
host_state_2 = fakes.FakeBackendState('host2', {
|
||||
'host': 'host2.example.com',
|
||||
'capabilities': {
|
||||
'goodness_function': '200'
|
||||
@ -183,7 +183,7 @@ class GoodnessWeigherTestCase(test.TestCase):
|
||||
|
||||
def test_goodness_weigher_invalid_goodness_function(self):
|
||||
weigher = goodness.GoodnessWeigher()
|
||||
host_state = fakes.FakeHostState('host1', {
|
||||
host_state = fakes.FakeBackendState('host1', {
|
||||
'host': 'host.example.com',
|
||||
'capabilities': {
|
||||
'goodness_function': '50 / 0'
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -34,13 +34,13 @@ from cinder.tests.unit import fake_constants as fake
|
||||
from cinder.tests.unit.objects import test_service
|
||||
|
||||
|
||||
class FakeFilterClass1(filters.BaseHostFilter):
|
||||
def host_passes(self, host_state, filter_properties):
|
||||
class FakeFilterClass1(filters.BaseBackendFilter):
|
||||
def backend_passes(self, host_state, filter_properties):
|
||||
pass
|
||||
|
||||
|
||||
class FakeFilterClass2(filters.BaseHostFilter):
|
||||
def host_passes(self, host_state, filter_properties):
|
||||
class FakeFilterClass2(filters.BaseBackendFilter):
|
||||
def backend_passes(self, host_state, filter_properties):
|
||||
pass
|
||||
|
||||
|
||||
@ -50,46 +50,46 @@ class HostManagerTestCase(test.TestCase):
|
||||
def setUp(self):
|
||||
super(HostManagerTestCase, self).setUp()
|
||||
self.host_manager = host_manager.HostManager()
|
||||
self.fake_hosts = [host_manager.HostState('fake_host%s' % x, None)
|
||||
for x in range(1, 5)]
|
||||
self.fake_backends = [host_manager.BackendState('fake_be%s' % x, None)
|
||||
for x in range(1, 5)]
|
||||
# For a second scheduler service.
|
||||
self.host_manager_1 = host_manager.HostManager()
|
||||
|
||||
def test_choose_host_filters_not_found(self):
|
||||
def test_choose_backend_filters_not_found(self):
|
||||
self.flags(scheduler_default_filters='FakeFilterClass3')
|
||||
self.host_manager.filter_classes = [FakeFilterClass1,
|
||||
FakeFilterClass2]
|
||||
self.assertRaises(exception.SchedulerHostFilterNotFound,
|
||||
self.host_manager._choose_host_filters, None)
|
||||
self.host_manager._choose_backend_filters, None)
|
||||
|
||||
def test_choose_host_filters(self):
|
||||
def test_choose_backend_filters(self):
|
||||
self.flags(scheduler_default_filters=['FakeFilterClass2'])
|
||||
self.host_manager.filter_classes = [FakeFilterClass1,
|
||||
FakeFilterClass2]
|
||||
|
||||
# Test 'volume' returns 1 correct function
|
||||
filter_classes = self.host_manager._choose_host_filters(None)
|
||||
filter_classes = self.host_manager._choose_backend_filters(None)
|
||||
self.assertEqual(1, len(filter_classes))
|
||||
self.assertEqual('FakeFilterClass2', filter_classes[0].__name__)
|
||||
|
||||
@mock.patch('cinder.scheduler.host_manager.HostManager.'
|
||||
'_choose_host_filters')
|
||||
def test_get_filtered_hosts(self, _mock_choose_host_filters):
|
||||
'_choose_backend_filters')
|
||||
def test_get_filtered_backends(self, _mock_choose_backend_filters):
|
||||
filter_class = FakeFilterClass1
|
||||
mock_func = mock.Mock()
|
||||
mock_func.return_value = True
|
||||
filter_class._filter_one = mock_func
|
||||
_mock_choose_host_filters.return_value = [filter_class]
|
||||
_mock_choose_backend_filters.return_value = [filter_class]
|
||||
|
||||
fake_properties = {'moo': 1, 'cow': 2}
|
||||
expected = []
|
||||
for fake_host in self.fake_hosts:
|
||||
expected.append(mock.call(fake_host, fake_properties))
|
||||
for fake_backend in self.fake_backends:
|
||||
expected.append(mock.call(fake_backend, fake_properties))
|
||||
|
||||
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
|
||||
fake_properties)
|
||||
result = self.host_manager.get_filtered_backends(self.fake_backends,
|
||||
fake_properties)
|
||||
self.assertEqual(expected, mock_func.call_args_list)
|
||||
self.assertEqual(set(self.fake_hosts), set(result))
|
||||
self.assertEqual(set(self.fake_backends), set(result))
|
||||
|
||||
@mock.patch('cinder.scheduler.host_manager.HostManager._get_updated_pools')
|
||||
@mock.patch('oslo_utils.timeutils.utcnow')
|
||||
@ -563,6 +563,10 @@ class HostManagerTestCase(test.TestCase):
|
||||
self.assertEqual(1, len(res))
|
||||
self.assertEqual(dates[1], res[0]['capabilities']['timestamp'])
|
||||
|
||||
# Now we simulate old service that doesn't send timestamp
|
||||
del mocked_service_states['host1']['timestamp']
|
||||
with mock.patch.dict(self.host_manager.service_states,
|
||||
mocked_service_states):
|
||||
self.host_manager.update_service_capabilities(service_name,
|
||||
'host1',
|
||||
host_volume_capabs,
|
||||
@ -572,8 +576,8 @@ class HostManagerTestCase(test.TestCase):
|
||||
self.assertEqual(dates[2], res[0]['capabilities']['timestamp'])
|
||||
|
||||
@mock.patch('cinder.objects.Service.is_up', True)
|
||||
def test_get_all_host_states_cluster(self):
|
||||
"""Test get_all_host_states when we have clustered services.
|
||||
def test_get_all_backend_states_cluster(self):
|
||||
"""Test get_all_backend_states when we have clustered services.
|
||||
|
||||
Confirm that clustered services are grouped and that only the latest
|
||||
of the capability reports is relevant.
|
||||
@ -631,7 +635,7 @@ class HostManagerTestCase(test.TestCase):
|
||||
'volume', services[i].host, capabilities[i][1],
|
||||
services[i].cluster_name, capabilities[i][0])
|
||||
|
||||
res = self.host_manager.get_all_host_states(ctxt)
|
||||
res = self.host_manager.get_all_backend_states(ctxt)
|
||||
result = {(s.cluster_name or s.host, s.free_capacity_gb) for s in res}
|
||||
expected = {(cluster_name + '#_pool0', 2000),
|
||||
('non_clustered_host#_pool0', 4000)}
|
||||
@ -640,8 +644,8 @@ class HostManagerTestCase(test.TestCase):
|
||||
@mock.patch('cinder.db.service_get_all')
|
||||
@mock.patch('cinder.objects.service.Service.is_up',
|
||||
new_callable=mock.PropertyMock)
|
||||
def test_get_all_host_states(self, _mock_service_is_up,
|
||||
_mock_service_get_all):
|
||||
def test_get_all_backend_states(self, _mock_service_is_up,
|
||||
_mock_service_get_all):
|
||||
context = 'fake_context'
|
||||
timestamp = datetime.utcnow()
|
||||
topic = constants.VOLUME_TOPIC
|
||||
@ -695,7 +699,7 @@ class HostManagerTestCase(test.TestCase):
|
||||
host_manager.LOG.warning = _mock_warning
|
||||
|
||||
# Get all states
|
||||
self.host_manager.get_all_host_states(context)
|
||||
self.host_manager.get_all_backend_states(context)
|
||||
_mock_service_get_all.assert_called_with(context,
|
||||
disabled=False,
|
||||
topic=topic)
|
||||
@ -704,14 +708,14 @@ class HostManagerTestCase(test.TestCase):
|
||||
expected = [mock.call() for s in service_objs]
|
||||
self.assertEqual(expected, _mock_service_is_up.call_args_list)
|
||||
|
||||
# Get host_state_map and make sure we have the first 3 hosts
|
||||
host_state_map = self.host_manager.host_state_map
|
||||
self.assertEqual(3, len(host_state_map))
|
||||
# Get backend_state_map and make sure we have the first 3 hosts
|
||||
backend_state_map = self.host_manager.backend_state_map
|
||||
self.assertEqual(3, len(backend_state_map))
|
||||
for i in range(3):
|
||||
volume_node = services[i]
|
||||
host = volume_node['host']
|
||||
test_service.TestService._compare(self, volume_node,
|
||||
host_state_map[host].service)
|
||||
backend_state_map[host].service)
|
||||
|
||||
# Second test: Now service.is_up returns False for host3
|
||||
_mock_service_is_up.reset_mock()
|
||||
@ -720,7 +724,7 @@ class HostManagerTestCase(test.TestCase):
|
||||
_mock_warning.reset_mock()
|
||||
|
||||
# Get all states, make sure host 3 is reported as down
|
||||
self.host_manager.get_all_host_states(context)
|
||||
self.host_manager.get_all_backend_states(context)
|
||||
_mock_service_get_all.assert_called_with(context,
|
||||
disabled=False,
|
||||
topic=topic)
|
||||
@ -728,15 +732,15 @@ class HostManagerTestCase(test.TestCase):
|
||||
self.assertEqual(expected, _mock_service_is_up.call_args_list)
|
||||
self.assertGreater(_mock_warning.call_count, 0)
|
||||
|
||||
# Get host_state_map and make sure we have the first 2 hosts (host3 is
|
||||
# down, host4 is missing capabilities)
|
||||
host_state_map = self.host_manager.host_state_map
|
||||
self.assertEqual(2, len(host_state_map))
|
||||
# Get backend_state_map and make sure we have the first 2 hosts (host3
|
||||
# is down, host4 is missing capabilities)
|
||||
backend_state_map = self.host_manager.backend_state_map
|
||||
self.assertEqual(2, len(backend_state_map))
|
||||
for i in range(2):
|
||||
volume_node = services[i]
|
||||
host = volume_node['host']
|
||||
test_service.TestService._compare(self, volume_node,
|
||||
host_state_map[host].service)
|
||||
backend_state_map[host].service)
|
||||
|
||||
@mock.patch('cinder.db.service_get_all')
|
||||
@mock.patch('cinder.objects.service.Service.is_up',
|
||||
@ -963,12 +967,12 @@ class HostManagerTestCase(test.TestCase):
|
||||
sorted(res2, key=sort_func))
|
||||
|
||||
|
||||
class HostStateTestCase(test.TestCase):
|
||||
"""Test case for HostState class."""
|
||||
class BackendStateTestCase(test.TestCase):
|
||||
"""Test case for BackendState class."""
|
||||
|
||||
def test_update_from_volume_capability_nopool(self):
|
||||
fake_host = host_manager.HostState('host1', None)
|
||||
self.assertIsNone(fake_host.free_capacity_gb)
|
||||
fake_backend = host_manager.BackendState('be1', None)
|
||||
self.assertIsNone(fake_backend.free_capacity_gb)
|
||||
|
||||
volume_capability = {'total_capacity_gb': 1024,
|
||||
'free_capacity_gb': 512,
|
||||
@ -976,34 +980,34 @@ class HostStateTestCase(test.TestCase):
|
||||
'reserved_percentage': 0,
|
||||
'timestamp': None}
|
||||
|
||||
fake_host.update_from_volume_capability(volume_capability)
|
||||
fake_backend.update_from_volume_capability(volume_capability)
|
||||
# Backend level stats remain uninitialized
|
||||
self.assertEqual(0, fake_host.total_capacity_gb)
|
||||
self.assertIsNone(fake_host.free_capacity_gb)
|
||||
self.assertEqual(0, fake_backend.total_capacity_gb)
|
||||
self.assertIsNone(fake_backend.free_capacity_gb)
|
||||
# Pool stats has been updated
|
||||
self.assertEqual(1024, fake_host.pools['_pool0'].total_capacity_gb)
|
||||
self.assertEqual(512, fake_host.pools['_pool0'].free_capacity_gb)
|
||||
self.assertEqual(1024, fake_backend.pools['_pool0'].total_capacity_gb)
|
||||
self.assertEqual(512, fake_backend.pools['_pool0'].free_capacity_gb)
|
||||
self.assertEqual(512,
|
||||
fake_host.pools['_pool0'].provisioned_capacity_gb)
|
||||
fake_backend.pools['_pool0'].provisioned_capacity_gb)
|
||||
|
||||
# Test update for existing host state
|
||||
volume_capability.update(dict(total_capacity_gb=1000))
|
||||
fake_host.update_from_volume_capability(volume_capability)
|
||||
self.assertEqual(1000, fake_host.pools['_pool0'].total_capacity_gb)
|
||||
fake_backend.update_from_volume_capability(volume_capability)
|
||||
self.assertEqual(1000, fake_backend.pools['_pool0'].total_capacity_gb)
|
||||
|
||||
# Test update for existing host state with different backend name
|
||||
volume_capability.update(dict(volume_backend_name='magic'))
|
||||
fake_host.update_from_volume_capability(volume_capability)
|
||||
self.assertEqual(1000, fake_host.pools['magic'].total_capacity_gb)
|
||||
self.assertEqual(512, fake_host.pools['magic'].free_capacity_gb)
|
||||
fake_backend.update_from_volume_capability(volume_capability)
|
||||
self.assertEqual(1000, fake_backend.pools['magic'].total_capacity_gb)
|
||||
self.assertEqual(512, fake_backend.pools['magic'].free_capacity_gb)
|
||||
self.assertEqual(512,
|
||||
fake_host.pools['magic'].provisioned_capacity_gb)
|
||||
fake_backend.pools['magic'].provisioned_capacity_gb)
|
||||
# 'pool0' becomes nonactive pool, and is deleted
|
||||
self.assertRaises(KeyError, lambda: fake_host.pools['pool0'])
|
||||
self.assertRaises(KeyError, lambda: fake_backend.pools['pool0'])
|
||||
|
||||
def test_update_from_volume_capability_with_pools(self):
|
||||
fake_host = host_manager.HostState('host1', None)
|
||||
self.assertIsNone(fake_host.free_capacity_gb)
|
||||
fake_backend = host_manager.BackendState('host1', None)
|
||||
self.assertIsNone(fake_backend.free_capacity_gb)
|
||||
capability = {
|
||||
'volume_backend_name': 'Local iSCSI',
|
||||
'vendor_name': 'OpenStack',
|
||||
@ -1037,27 +1041,28 @@ class HostStateTestCase(test.TestCase):
|
||||
'timestamp': None,
|
||||
}
|
||||
|
||||
fake_host.update_from_volume_capability(capability)
|
||||
fake_backend.update_from_volume_capability(capability)
|
||||
|
||||
self.assertEqual('Local iSCSI', fake_host.volume_backend_name)
|
||||
self.assertEqual('iSCSI', fake_host.storage_protocol)
|
||||
self.assertEqual('OpenStack', fake_host.vendor_name)
|
||||
self.assertEqual('1.0.1', fake_host.driver_version)
|
||||
self.assertEqual('Local iSCSI', fake_backend.volume_backend_name)
|
||||
self.assertEqual('iSCSI', fake_backend.storage_protocol)
|
||||
self.assertEqual('OpenStack', fake_backend.vendor_name)
|
||||
self.assertEqual('1.0.1', fake_backend.driver_version)
|
||||
|
||||
# Backend level stats remain uninitialized
|
||||
self.assertEqual(0, fake_host.total_capacity_gb)
|
||||
self.assertIsNone(fake_host.free_capacity_gb)
|
||||
self.assertEqual(0, fake_backend.total_capacity_gb)
|
||||
self.assertIsNone(fake_backend.free_capacity_gb)
|
||||
# Pool stats has been updated
|
||||
self.assertEqual(2, len(fake_host.pools))
|
||||
self.assertEqual(2, len(fake_backend.pools))
|
||||
|
||||
self.assertEqual(500, fake_host.pools['1st pool'].total_capacity_gb)
|
||||
self.assertEqual(230, fake_host.pools['1st pool'].free_capacity_gb)
|
||||
self.assertEqual(270,
|
||||
fake_host.pools['1st pool'].provisioned_capacity_gb)
|
||||
self.assertEqual(1024, fake_host.pools['2nd pool'].total_capacity_gb)
|
||||
self.assertEqual(1024, fake_host.pools['2nd pool'].free_capacity_gb)
|
||||
self.assertEqual(0,
|
||||
fake_host.pools['2nd pool'].provisioned_capacity_gb)
|
||||
self.assertEqual(500, fake_backend.pools['1st pool'].total_capacity_gb)
|
||||
self.assertEqual(230, fake_backend.pools['1st pool'].free_capacity_gb)
|
||||
self.assertEqual(
|
||||
270, fake_backend.pools['1st pool'].provisioned_capacity_gb)
|
||||
self.assertEqual(
|
||||
1024, fake_backend.pools['2nd pool'].total_capacity_gb)
|
||||
self.assertEqual(1024, fake_backend.pools['2nd pool'].free_capacity_gb)
|
||||
self.assertEqual(
|
||||
0, fake_backend.pools['2nd pool'].provisioned_capacity_gb)
|
||||
|
||||
capability = {
|
||||
'volume_backend_name': 'Local iSCSI',
|
||||
@ -1077,83 +1082,85 @@ class HostStateTestCase(test.TestCase):
|
||||
'timestamp': None,
|
||||
}
|
||||
|
||||
# test update HostState Record
|
||||
fake_host.update_from_volume_capability(capability)
|
||||
# test update BackendState Record
|
||||
fake_backend.update_from_volume_capability(capability)
|
||||
|
||||
self.assertEqual('1.0.2', fake_host.driver_version)
|
||||
self.assertEqual('1.0.2', fake_backend.driver_version)
|
||||
|
||||
# Non-active pool stats has been removed
|
||||
self.assertEqual(1, len(fake_host.pools))
|
||||
self.assertEqual(1, len(fake_backend.pools))
|
||||
|
||||
self.assertRaises(KeyError, lambda: fake_host.pools['1st pool'])
|
||||
self.assertRaises(KeyError, lambda: fake_host.pools['2nd pool'])
|
||||
self.assertRaises(KeyError, lambda: fake_backend.pools['1st pool'])
|
||||
self.assertRaises(KeyError, lambda: fake_backend.pools['2nd pool'])
|
||||
|
||||
self.assertEqual(10000, fake_host.pools['3rd pool'].total_capacity_gb)
|
||||
self.assertEqual(10000, fake_host.pools['3rd pool'].free_capacity_gb)
|
||||
self.assertEqual(0,
|
||||
fake_host.pools['3rd pool'].provisioned_capacity_gb)
|
||||
self.assertEqual(10000,
|
||||
fake_backend.pools['3rd pool'].total_capacity_gb)
|
||||
self.assertEqual(10000,
|
||||
fake_backend.pools['3rd pool'].free_capacity_gb)
|
||||
self.assertEqual(
|
||||
0, fake_backend.pools['3rd pool'].provisioned_capacity_gb)
|
||||
|
||||
def test_update_from_volume_infinite_capability(self):
|
||||
fake_host = host_manager.HostState('host1', None)
|
||||
self.assertIsNone(fake_host.free_capacity_gb)
|
||||
fake_backend = host_manager.BackendState('host1', None)
|
||||
self.assertIsNone(fake_backend.free_capacity_gb)
|
||||
|
||||
volume_capability = {'total_capacity_gb': 'infinite',
|
||||
'free_capacity_gb': 'infinite',
|
||||
'reserved_percentage': 0,
|
||||
'timestamp': None}
|
||||
|
||||
fake_host.update_from_volume_capability(volume_capability)
|
||||
fake_backend.update_from_volume_capability(volume_capability)
|
||||
# Backend level stats remain uninitialized
|
||||
self.assertEqual(0, fake_host.total_capacity_gb)
|
||||
self.assertIsNone(fake_host.free_capacity_gb)
|
||||
self.assertEqual(0, fake_backend.total_capacity_gb)
|
||||
self.assertIsNone(fake_backend.free_capacity_gb)
|
||||
# Pool stats has been updated
|
||||
self.assertEqual(
|
||||
'infinite',
|
||||
fake_host.pools['_pool0'].total_capacity_gb)
|
||||
fake_backend.pools['_pool0'].total_capacity_gb)
|
||||
self.assertEqual(
|
||||
'infinite',
|
||||
fake_host.pools['_pool0'].free_capacity_gb)
|
||||
fake_backend.pools['_pool0'].free_capacity_gb)
|
||||
|
||||
def test_update_from_volume_unknown_capability(self):
|
||||
fake_host = host_manager.HostState('host1', None)
|
||||
self.assertIsNone(fake_host.free_capacity_gb)
|
||||
fake_backend = host_manager.BackendState('host1', None)
|
||||
self.assertIsNone(fake_backend.free_capacity_gb)
|
||||
|
||||
volume_capability = {'total_capacity_gb': 'infinite',
|
||||
'free_capacity_gb': 'unknown',
|
||||
'reserved_percentage': 0,
|
||||
'timestamp': None}
|
||||
|
||||
fake_host.update_from_volume_capability(volume_capability)
|
||||
fake_backend.update_from_volume_capability(volume_capability)
|
||||
# Backend level stats remain uninitialized
|
||||
self.assertEqual(0, fake_host.total_capacity_gb)
|
||||
self.assertIsNone(fake_host.free_capacity_gb)
|
||||
self.assertEqual(0, fake_backend.total_capacity_gb)
|
||||
self.assertIsNone(fake_backend.free_capacity_gb)
|
||||
# Pool stats has been updated
|
||||
self.assertEqual(
|
||||
'infinite',
|
||||
fake_host.pools['_pool0'].total_capacity_gb)
|
||||
fake_backend.pools['_pool0'].total_capacity_gb)
|
||||
self.assertEqual(
|
||||
'unknown',
|
||||
fake_host.pools['_pool0'].free_capacity_gb)
|
||||
fake_backend.pools['_pool0'].free_capacity_gb)
|
||||
|
||||
def test_update_from_empty_volume_capability(self):
|
||||
fake_host = host_manager.HostState('host1', None)
|
||||
fake_backend = host_manager.BackendState('host1', None)
|
||||
|
||||
vol_cap = {'timestamp': None}
|
||||
|
||||
fake_host.update_from_volume_capability(vol_cap)
|
||||
self.assertEqual(0, fake_host.total_capacity_gb)
|
||||
self.assertIsNone(fake_host.free_capacity_gb)
|
||||
fake_backend.update_from_volume_capability(vol_cap)
|
||||
self.assertEqual(0, fake_backend.total_capacity_gb)
|
||||
self.assertIsNone(fake_backend.free_capacity_gb)
|
||||
# Pool stats has been updated
|
||||
self.assertEqual(0,
|
||||
fake_host.pools['_pool0'].total_capacity_gb)
|
||||
fake_backend.pools['_pool0'].total_capacity_gb)
|
||||
self.assertEqual(0,
|
||||
fake_host.pools['_pool0'].free_capacity_gb)
|
||||
fake_backend.pools['_pool0'].free_capacity_gb)
|
||||
self.assertEqual(0,
|
||||
fake_host.pools['_pool0'].provisioned_capacity_gb)
|
||||
fake_backend.pools['_pool0'].provisioned_capacity_gb)
|
||||
|
||||
|
||||
class PoolStateTestCase(test.TestCase):
|
||||
"""Test case for HostState class."""
|
||||
"""Test case for BackendState class."""
|
||||
|
||||
def test_update_from_volume_capability(self):
|
||||
fake_pool = host_manager.PoolState('host1', None, None, 'pool0')
|
||||
|
@ -126,9 +126,9 @@ class SchedulerManagerTestCase(test.TestCase):
|
||||
def test_create_volume_exception_puts_volume_in_error_state(
|
||||
self, _mock_volume_update, _mock_message_create,
|
||||
_mock_sched_create):
|
||||
# Test NoValidHost exception behavior for create_volume.
|
||||
# Test NoValidBackend exception behavior for create_volume.
|
||||
# Puts the volume in 'error' state and eats the exception.
|
||||
_mock_sched_create.side_effect = exception.NoValidHost(reason="")
|
||||
_mock_sched_create.side_effect = exception.NoValidBackend(reason="")
|
||||
volume = fake_volume.fake_volume_obj(self.context)
|
||||
request_spec = {'volume_id': volume.id,
|
||||
'volume': {'id': volume.id, '_name_id': None,
|
||||
@ -223,39 +223,39 @@ class SchedulerManagerTestCase(test.TestCase):
|
||||
self.assertFalse(_mock_sleep.called)
|
||||
|
||||
@mock.patch('cinder.db.volume_get')
|
||||
@mock.patch('cinder.scheduler.driver.Scheduler.host_passes_filters')
|
||||
@mock.patch('cinder.scheduler.driver.Scheduler.backend_passes_filters')
|
||||
@mock.patch('cinder.db.volume_update')
|
||||
def test_migrate_volume_exception_returns_volume_state(
|
||||
self, _mock_volume_update, _mock_host_passes,
|
||||
self, _mock_volume_update, _mock_backend_passes,
|
||||
_mock_volume_get):
|
||||
# Test NoValidHost exception behavior for migrate_volume_to_host.
|
||||
# Test NoValidBackend exception behavior for migrate_volume_to_host.
|
||||
# Puts the volume in 'error_migrating' state and eats the exception.
|
||||
fake_updates = {'migration_status': 'error'}
|
||||
self._test_migrate_volume_exception_returns_volume_state(
|
||||
_mock_volume_update, _mock_host_passes, _mock_volume_get,
|
||||
_mock_volume_update, _mock_backend_passes, _mock_volume_get,
|
||||
'available', fake_updates)
|
||||
|
||||
@mock.patch('cinder.db.volume_get')
|
||||
@mock.patch('cinder.scheduler.driver.Scheduler.host_passes_filters')
|
||||
@mock.patch('cinder.scheduler.driver.Scheduler.backend_passes_filters')
|
||||
@mock.patch('cinder.db.volume_update')
|
||||
def test_migrate_volume_exception_returns_volume_state_maintenance(
|
||||
self, _mock_volume_update, _mock_host_passes,
|
||||
self, _mock_volume_update, _mock_backend_passes,
|
||||
_mock_volume_get):
|
||||
fake_updates = {'status': 'available',
|
||||
'migration_status': 'error'}
|
||||
self._test_migrate_volume_exception_returns_volume_state(
|
||||
_mock_volume_update, _mock_host_passes, _mock_volume_get,
|
||||
_mock_volume_update, _mock_backend_passes, _mock_volume_get,
|
||||
'maintenance', fake_updates)
|
||||
|
||||
def _test_migrate_volume_exception_returns_volume_state(
|
||||
self, _mock_volume_update, _mock_host_passes,
|
||||
self, _mock_volume_update, _mock_backend_passes,
|
||||
_mock_volume_get, status, fake_updates):
|
||||
volume = tests_utils.create_volume(self.context,
|
||||
status=status,
|
||||
previous_status='available')
|
||||
fake_volume_id = volume.id
|
||||
request_spec = {'volume_id': fake_volume_id}
|
||||
_mock_host_passes.side_effect = exception.NoValidHost(reason="")
|
||||
_mock_backend_passes.side_effect = exception.NoValidBackend(reason="")
|
||||
_mock_volume_get.return_value = volume
|
||||
|
||||
self.manager.migrate_volume_to_host(self.context, volume, 'host', True,
|
||||
@ -264,15 +264,15 @@ class SchedulerManagerTestCase(test.TestCase):
|
||||
_mock_volume_update.assert_called_once_with(self.context,
|
||||
fake_volume_id,
|
||||
fake_updates)
|
||||
_mock_host_passes.assert_called_once_with(self.context, 'host',
|
||||
request_spec, {})
|
||||
_mock_backend_passes.assert_called_once_with(self.context, 'host',
|
||||
request_spec, {})
|
||||
|
||||
@mock.patch('cinder.db.volume_update')
|
||||
@mock.patch('cinder.db.volume_attachment_get_all_by_volume_id')
|
||||
@mock.patch('cinder.quota.QUOTAS.rollback')
|
||||
def test_retype_volume_exception_returns_volume_state(
|
||||
self, quota_rollback, _mock_vol_attachment_get, _mock_vol_update):
|
||||
# Test NoValidHost exception behavior for retype.
|
||||
# Test NoValidBackend exception behavior for retype.
|
||||
# Puts the volume in original state and eats the exception.
|
||||
volume = tests_utils.create_volume(self.context,
|
||||
status='retyping',
|
||||
@ -287,17 +287,17 @@ class SchedulerManagerTestCase(test.TestCase):
|
||||
'migration_policy': 'on-demand',
|
||||
'quota_reservations': reservations}
|
||||
_mock_vol_update.return_value = {'status': 'in-use'}
|
||||
_mock_find_retype_host = mock.Mock(
|
||||
side_effect=exception.NoValidHost(reason=""))
|
||||
orig_retype = self.manager.driver.find_retype_host
|
||||
self.manager.driver.find_retype_host = _mock_find_retype_host
|
||||
_mock_find_retype_backend = mock.Mock(
|
||||
side_effect=exception.NoValidBackend(reason=""))
|
||||
orig_retype = self.manager.driver.find_retype_backend
|
||||
self.manager.driver.find_retype_backend = _mock_find_retype_backend
|
||||
|
||||
self.manager.retype(self.context, volume, request_spec=request_spec,
|
||||
filter_properties={})
|
||||
|
||||
_mock_find_retype_host.assert_called_once_with(self.context,
|
||||
request_spec, {},
|
||||
'on-demand')
|
||||
_mock_find_retype_backend.assert_called_once_with(self.context,
|
||||
request_spec, {},
|
||||
'on-demand')
|
||||
quota_rollback.assert_called_once_with(self.context, reservations)
|
||||
_mock_vol_update.assert_called_once_with(self.context, volume.id,
|
||||
{'status': 'in-use'})
|
||||
@ -329,7 +329,7 @@ class SchedulerManagerTestCase(test.TestCase):
|
||||
LOG.exception.reset_mock()
|
||||
db.consistencygroup_update.reset_mock()
|
||||
|
||||
mock_cg.side_effect = exception.NoValidHost(
|
||||
mock_cg.side_effect = exception.NoValidBackend(
|
||||
reason="No weighed hosts available")
|
||||
self.manager.create_consistencygroup(
|
||||
self.context, consistencygroup_obj)
|
||||
|
@ -68,21 +68,21 @@ class VolumeNumberWeigherTestCase(test.TestCase):
|
||||
weight_properties)[0]
|
||||
|
||||
@mock.patch('cinder.db.sqlalchemy.api.service_get_all')
|
||||
def _get_all_hosts(self, _mock_service_get_all, disabled=False):
|
||||
def _get_all_backends(self, _mock_service_get_all, disabled=False):
|
||||
ctxt = context.get_admin_context()
|
||||
fakes.mock_host_manager_db_calls(_mock_service_get_all,
|
||||
disabled=disabled)
|
||||
host_states = self.host_manager.get_all_host_states(ctxt)
|
||||
backend_states = self.host_manager.get_all_backend_states(ctxt)
|
||||
_mock_service_get_all.assert_called_once_with(
|
||||
ctxt,
|
||||
None, # backend_match_level
|
||||
topic=constants.VOLUME_TOPIC,
|
||||
disabled=disabled)
|
||||
return host_states
|
||||
return backend_states
|
||||
|
||||
def test_volume_number_weight_multiplier1(self):
|
||||
self.flags(volume_number_multiplier=-1.0)
|
||||
hostinfo_list = self._get_all_hosts()
|
||||
backend_info_list = self._get_all_backends()
|
||||
|
||||
# host1: 1 volume Norm=0.0
|
||||
# host2: 2 volumes
|
||||
@ -92,14 +92,14 @@ class VolumeNumberWeigherTestCase(test.TestCase):
|
||||
# so, host1 should win:
|
||||
with mock.patch.object(api, 'volume_data_get_for_host',
|
||||
fake_volume_data_get_for_host):
|
||||
weighed_host = self._get_weighed_host(hostinfo_list)
|
||||
weighed_host = self._get_weighed_host(backend_info_list)
|
||||
self.assertEqual(0.0, weighed_host.weight)
|
||||
self.assertEqual('host1',
|
||||
utils.extract_host(weighed_host.obj.host))
|
||||
|
||||
def test_volume_number_weight_multiplier2(self):
|
||||
self.flags(volume_number_multiplier=1.0)
|
||||
hostinfo_list = self._get_all_hosts()
|
||||
backend_info_list = self._get_all_backends()
|
||||
|
||||
# host1: 1 volume Norm=0
|
||||
# host2: 2 volumes
|
||||
@ -109,7 +109,7 @@ class VolumeNumberWeigherTestCase(test.TestCase):
|
||||
# so, host5 should win:
|
||||
with mock.patch.object(api, 'volume_data_get_for_host',
|
||||
fake_volume_data_get_for_host):
|
||||
weighed_host = self._get_weighed_host(hostinfo_list)
|
||||
weighed_host = self._get_weighed_host(backend_info_list)
|
||||
self.assertEqual(1.0, weighed_host.weight)
|
||||
self.assertEqual('host5',
|
||||
utils.extract_host(weighed_host.obj.host))
|
||||
|
@ -203,20 +203,30 @@ class DBAPIServiceTestCase(BaseTest):
|
||||
def test_service_get_all(self):
|
||||
expired = (datetime.datetime.utcnow()
|
||||
- datetime.timedelta(seconds=CONF.service_down_time + 1))
|
||||
db.cluster_create(self.ctxt, {'name': 'cluster_disabled',
|
||||
'binary': 'fake_binary',
|
||||
'disabled': True})
|
||||
db.cluster_create(self.ctxt, {'name': 'cluster_enabled',
|
||||
'binary': 'fake_binary',
|
||||
'disabled': False})
|
||||
values = [
|
||||
# Now we are updating updated_at at creation as well so this one
|
||||
# is up.
|
||||
{'host': 'host1', 'binary': 'b1', 'created_at': expired},
|
||||
{'host': 'host1@ceph', 'binary': 'b2'},
|
||||
{'host': 'host2', 'binary': 'b2'},
|
||||
{'disabled': False, 'cluster_name': 'cluster_enabled'},
|
||||
{'disabled': True, 'cluster_name': 'cluster_enabled'},
|
||||
{'disabled': False, 'cluster_name': 'cluster_disabled'},
|
||||
{'disabled': True, 'cluster_name': 'cluster_disabled'},
|
||||
{'disabled': True, 'created_at': expired, 'updated_at': expired},
|
||||
]
|
||||
services = [self._create_service(vals) for vals in values]
|
||||
|
||||
disabled_services = services[-1:]
|
||||
non_disabled_services = services[:-1]
|
||||
up_services = services[0:3]
|
||||
down_services = [services[3]]
|
||||
disabled_services = services[-3:]
|
||||
non_disabled_services = services[:-3]
|
||||
up_services = services[:7]
|
||||
down_services = [services[7]]
|
||||
expected = services[:2]
|
||||
expected_bin = services[1:3]
|
||||
compares = [
|
||||
|
@ -156,15 +156,14 @@ class VolumeRpcAPITestCase(test.TestCase):
|
||||
|
||||
if 'host' in expected_msg:
|
||||
del expected_msg['host']
|
||||
if 'dest_host' in expected_msg:
|
||||
dest_host = expected_msg.pop('dest_host')
|
||||
dest_host_dict = {'host': dest_host.host,
|
||||
'cluster_name': dest_host.cluster_name,
|
||||
'capabilities': dest_host.capabilities}
|
||||
expected_msg['host'] = dest_host_dict
|
||||
if 'dest_backend' in expected_msg:
|
||||
dest_backend = expected_msg.pop('dest_backend')
|
||||
dest_backend_dict = {'host': dest_backend.host,
|
||||
'cluster_name': dest_backend.cluster_name,
|
||||
'capabilities': dest_backend.capabilities}
|
||||
expected_msg['host'] = dest_backend_dict
|
||||
if 'force_copy' in expected_msg:
|
||||
expected_msg['force_host_copy'] = expected_msg.pop('force_copy')
|
||||
|
||||
if 'new_volume' in expected_msg:
|
||||
volume = expected_msg['new_volume']
|
||||
expected_msg['new_volume_id'] = volume['id']
|
||||
@ -544,16 +543,16 @@ class VolumeRpcAPITestCase(test.TestCase):
|
||||
|
||||
@mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True)
|
||||
def test_migrate_volume(self, can_send_version):
|
||||
class FakeHost(object):
|
||||
class FakeBackend(object):
|
||||
def __init__(self):
|
||||
self.host = 'host'
|
||||
self.cluster_name = 'cluster_name'
|
||||
self.capabilities = {}
|
||||
dest_host = FakeHost()
|
||||
dest_backend = FakeBackend()
|
||||
self._test_volume_api('migrate_volume',
|
||||
rpc_method='cast',
|
||||
volume=self.fake_volume_obj,
|
||||
dest_host=dest_host,
|
||||
dest_backend=dest_backend,
|
||||
force_host_copy=True,
|
||||
version='3.5')
|
||||
|
||||
@ -567,17 +566,17 @@ class VolumeRpcAPITestCase(test.TestCase):
|
||||
|
||||
@mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True)
|
||||
def test_retype(self, can_send_version):
|
||||
class FakeHost(object):
|
||||
class FakeBackend(object):
|
||||
def __init__(self):
|
||||
self.host = 'host'
|
||||
self.cluster_name = 'cluster_name'
|
||||
self.capabilities = {}
|
||||
dest_host = FakeHost()
|
||||
dest_backend = FakeBackend()
|
||||
self._test_volume_api('retype',
|
||||
rpc_method='cast',
|
||||
volume=self.fake_volume_obj,
|
||||
new_type_id='fake',
|
||||
dest_host=dest_host,
|
||||
dest_backend=dest_backend,
|
||||
migration_policy='never',
|
||||
reservations=self.fake_reservations,
|
||||
old_reservations=self.fake_reservations,
|
||||
@ -608,7 +607,7 @@ class VolumeRpcAPITestCase(test.TestCase):
|
||||
rpc_method='cast',
|
||||
snapshot=my_fake_snapshot_obj,
|
||||
ref='foo',
|
||||
host='fake_host',
|
||||
backend='fake_host',
|
||||
version='3.0')
|
||||
|
||||
def test_freeze_host(self):
|
||||
|
@ -1164,7 +1164,7 @@ class FlashSystemDriverTestCase(test.TestCase):
|
||||
|
||||
# case 3: host name is neither unicode nor string
|
||||
conn = {'host': 12345}
|
||||
self.assertRaises(exception.NoValidHost,
|
||||
self.assertRaises(exception.NoValidBackend,
|
||||
self.driver._connector_to_hostname_prefix,
|
||||
conn)
|
||||
|
||||
|
@ -350,7 +350,7 @@ class NetAppESeriesDriverTestCase(object):
|
||||
mock.Mock(side_effect=socket.gaierror))
|
||||
|
||||
self.assertRaises(
|
||||
exception.NoValidHost,
|
||||
exception.NoValidBackend,
|
||||
driver.library._check_mode_get_or_register_storage_system)
|
||||
|
||||
def test_setup_error_invalid_first_controller_ip(self):
|
||||
@ -361,7 +361,7 @@ class NetAppESeriesDriverTestCase(object):
|
||||
mock.Mock(side_effect=socket.gaierror))
|
||||
|
||||
self.assertRaises(
|
||||
exception.NoValidHost,
|
||||
exception.NoValidBackend,
|
||||
driver.library._check_mode_get_or_register_storage_system)
|
||||
|
||||
def test_setup_error_invalid_second_controller_ip(self):
|
||||
@ -372,7 +372,7 @@ class NetAppESeriesDriverTestCase(object):
|
||||
mock.Mock(side_effect=socket.gaierror))
|
||||
|
||||
self.assertRaises(
|
||||
exception.NoValidHost,
|
||||
exception.NoValidBackend,
|
||||
driver.library._check_mode_get_or_register_storage_system)
|
||||
|
||||
def test_setup_error_invalid_both_controller_ips(self):
|
||||
@ -383,7 +383,7 @@ class NetAppESeriesDriverTestCase(object):
|
||||
mock.Mock(side_effect=socket.gaierror))
|
||||
|
||||
self.assertRaises(
|
||||
exception.NoValidHost,
|
||||
exception.NoValidBackend,
|
||||
driver.library._check_mode_get_or_register_storage_system)
|
||||
|
||||
def test_manage_existing_get_size(self):
|
||||
|
@ -189,7 +189,7 @@ class FlashSystemDriver(san.SanDriver,
|
||||
msg = _('_create_host: Can not translate host name. Host name '
|
||||
'is not unicode or string.')
|
||||
LOG.error(msg)
|
||||
raise exception.NoValidHost(reason=msg)
|
||||
raise exception.NoValidBackend(reason=msg)
|
||||
|
||||
host_name = six.text_type(host_name)
|
||||
|
||||
|
@ -259,7 +259,7 @@ class NetAppESeriesLibrary(object):
|
||||
except socket.gaierror as e:
|
||||
LOG.error(_LE('Error resolving host %(host)s. Error - %(e)s.'),
|
||||
{'host': host, 'e': e})
|
||||
raise exception.NoValidHost(
|
||||
raise exception.NoValidBackend(
|
||||
_("Controller IP '%(host)s' could not be resolved: %(e)s.")
|
||||
% {'host': host, 'e': e})
|
||||
|
||||
|
@ -244,10 +244,10 @@ class VolumeAPI(rpc.RPCAPI):
|
||||
cctxt.cast(ctxt, 'extend_volume', volume=volume, new_size=new_size,
|
||||
reservations=reservations)
|
||||
|
||||
def migrate_volume(self, ctxt, volume, dest_host, force_host_copy):
|
||||
backend_p = {'host': dest_host.host,
|
||||
'cluster_name': dest_host.cluster_name,
|
||||
'capabilities': dest_host.capabilities}
|
||||
def migrate_volume(self, ctxt, volume, dest_backend, force_host_copy):
|
||||
backend_p = {'host': dest_backend.host,
|
||||
'cluster_name': dest_backend.cluster_name,
|
||||
'capabilities': dest_backend.capabilities}
|
||||
|
||||
version = '3.5'
|
||||
if not self.client.can_send_version(version):
|
||||
@ -263,12 +263,12 @@ class VolumeAPI(rpc.RPCAPI):
|
||||
return cctxt.call(ctxt, 'migrate_volume_completion', volume=volume,
|
||||
new_volume=new_volume, error=error,)
|
||||
|
||||
def retype(self, ctxt, volume, new_type_id, dest_host,
|
||||
def retype(self, ctxt, volume, new_type_id, dest_backend,
|
||||
migration_policy='never', reservations=None,
|
||||
old_reservations=None):
|
||||
backend_p = {'host': dest_host.host,
|
||||
'cluster_name': dest_host.cluster_name,
|
||||
'capabilities': dest_host.capabilities}
|
||||
backend_p = {'host': dest_backend.host,
|
||||
'cluster_name': dest_backend.cluster_name,
|
||||
'capabilities': dest_backend.capabilities}
|
||||
version = '3.5'
|
||||
if not self.client.can_send_version(version):
|
||||
version = '3.0'
|
||||
@ -308,8 +308,8 @@ class VolumeAPI(rpc.RPCAPI):
|
||||
cctxt.cast(ctxt, 'failover_host',
|
||||
secondary_backend_id=secondary_backend_id)
|
||||
|
||||
def manage_existing_snapshot(self, ctxt, snapshot, ref, host):
|
||||
cctxt = self._get_cctxt(host)
|
||||
def manage_existing_snapshot(self, ctxt, snapshot, ref, backend):
|
||||
cctxt = self._get_cctxt(backend)
|
||||
cctxt.cast(ctxt, 'manage_existing_snapshot',
|
||||
snapshot=snapshot,
|
||||
ref=ref)
|
||||
|
Loading…
Reference in New Issue
Block a user