Merge "Cosmetic changes to scheduler"

This commit is contained in:
Jenkins 2016-12-16 17:04:36 +00:00 committed by Gerrit Code Review
commit 38b54d26f3
36 changed files with 1138 additions and 1086 deletions

View File

@ -574,8 +574,8 @@ class PasteAppNotFound(NotFound):
message = _("Could not load paste app '%(name)s' from %(path)s") message = _("Could not load paste app '%(name)s' from %(path)s")
class NoValidHost(CinderException): class NoValidBackend(CinderException):
message = _("No valid host was found. %(reason)s") message = _("No valid backend was found. %(reason)s")
class NoMoreTargets(CinderException): class NoMoreTargets(CinderException):

View File

@ -115,15 +115,21 @@ class Scheduler(object):
host, host,
capabilities) capabilities)
def host_passes_filters(self, context, host, request_spec, def host_passes_filters(self, context, backend, request_spec,
filter_properties): filter_properties):
"""Check if the specified host passes the filters.""" """Check if the specified backend passes the filters."""
raise NotImplementedError(_("Must implement host_passes_filters")) raise NotImplementedError(_("Must implement backend_passes_filters"))
def find_retype_host(self, context, request_spec, filter_properties=None, def find_retype_host(self, context, request_spec, filter_properties=None,
migration_policy='never'): migration_policy='never'):
"""Find a host that can accept the volume with its new type.""" """Find a backend that can accept the volume with its new type."""
raise NotImplementedError(_("Must implement find_retype_host")) raise NotImplementedError(_("Must implement find_retype_backend"))
# NOTE(geguileo): For backward compatibility with out of tree Schedulers
# we don't change host_passes_filters or find_retype_host method names but
# create an "alias" for them with the right name instead.
backend_passes_filters = host_passes_filters
find_retype_backend = find_retype_host
def schedule(self, context, topic, method, *_args, **_kwargs): def schedule(self, context, topic, method, *_args, **_kwargs):
"""Must override schedule method for scheduler to work.""" """Must override schedule method for scheduler to work."""

View File

@ -66,15 +66,16 @@ class FilterScheduler(driver.Scheduler):
request_spec_list, request_spec_list,
filter_properties_list): filter_properties_list):
weighed_host = self._schedule_group( weighed_backend = self._schedule_group(
context, context,
request_spec_list, request_spec_list,
filter_properties_list) filter_properties_list)
if not weighed_host: if not weighed_backend:
raise exception.NoValidHost(reason=_("No weighed hosts available")) raise exception.NoValidBackend(reason=_("No weighed backends "
"available"))
backend = weighed_host.obj backend = weighed_backend.obj
updated_group = driver.group_update_db(context, group, backend.host, updated_group = driver.group_update_db(context, group, backend.host,
backend.cluster_name) backend.cluster_name)
@ -85,17 +86,18 @@ class FilterScheduler(driver.Scheduler):
request_spec_list, request_spec_list,
group_filter_properties, group_filter_properties,
filter_properties_list): filter_properties_list):
weighed_host = self._schedule_generic_group( weighed_backend = self._schedule_generic_group(
context, context,
group_spec, group_spec,
request_spec_list, request_spec_list,
group_filter_properties, group_filter_properties,
filter_properties_list) filter_properties_list)
if not weighed_host: if not weighed_backend:
raise exception.NoValidHost(reason=_("No weighed hosts available")) raise exception.NoValidBackend(reason=_("No weighed backends "
"available"))
backend = weighed_host.obj backend = weighed_backend.obj
updated_group = driver.generic_group_update_db(context, group, updated_group = driver.generic_group_update_db(context, group,
backend.host, backend.host,
@ -104,13 +106,13 @@ class FilterScheduler(driver.Scheduler):
self.volume_rpcapi.create_group(context, updated_group) self.volume_rpcapi.create_group(context, updated_group)
def schedule_create_volume(self, context, request_spec, filter_properties): def schedule_create_volume(self, context, request_spec, filter_properties):
weighed_host = self._schedule(context, request_spec, backend = self._schedule(context, request_spec, filter_properties)
filter_properties)
if not weighed_host: if not backend:
raise exception.NoValidHost(reason=_("No weighed hosts available")) raise exception.NoValidBackend(reason=_("No weighed backends "
"available"))
backend = weighed_host.obj backend = backend.obj
volume_id = request_spec['volume_id'] volume_id = request_spec['volume_id']
updated_volume = driver.volume_update_db(context, volume_id, updated_volume = driver.volume_update_db(context, volume_id,
@ -126,25 +128,25 @@ class FilterScheduler(driver.Scheduler):
filter_properties, filter_properties,
allow_reschedule=True) allow_reschedule=True)
def host_passes_filters(self, context, host, request_spec, def backend_passes_filters(self, context, backend, request_spec,
filter_properties): filter_properties):
"""Check if the specified host passes the filters.""" """Check if the specified backend passes the filters."""
weighed_hosts = self._get_weighted_candidates(context, request_spec, weighed_backends = self._get_weighted_candidates(context, request_spec,
filter_properties) filter_properties)
for weighed_host in weighed_hosts: for weighed_backend in weighed_backends:
host_state = weighed_host.obj backend_state = weighed_backend.obj
if host_state.backend_id == host: if backend_state.backend_id == backend:
return host_state return backend_state
volume_id = request_spec.get('volume_id', '??volume_id missing??') volume_id = request_spec.get('volume_id', '??volume_id missing??')
raise exception.NoValidHost(reason=_('Cannot place volume %(id)s on ' raise exception.NoValidBackend(reason=_('Cannot place volume %(id)s '
'%(host)s') % 'on %(backend)s') %
{'id': volume_id, {'id': volume_id,
'host': host}) 'backend': backend})
def find_retype_host(self, context, request_spec, filter_properties=None, def find_retype_backend(self, context, request_spec,
migration_policy='never'): filter_properties=None, migration_policy='never'):
"""Find a host that can accept the volume with its new type.""" """Find a backend that can accept the volume with its new type."""
filter_properties = filter_properties or {} filter_properties = filter_properties or {}
backend = (request_spec['volume_properties'].get('cluster_name') backend = (request_spec['volume_properties'].get('cluster_name')
or request_spec['volume_properties']['host']) or request_spec['volume_properties']['host'])
@ -156,10 +158,10 @@ class FilterScheduler(driver.Scheduler):
weighed_backends = self._get_weighted_candidates(context, request_spec, weighed_backends = self._get_weighted_candidates(context, request_spec,
filter_properties) filter_properties)
if not weighed_backends: if not weighed_backends:
raise exception.NoValidHost(reason=_('No valid hosts for volume ' raise exception.NoValidBackend(
'%(id)s with type %(type)s') % reason=_('No valid backends for volume %(id)s with type '
{'id': request_spec['volume_id'], '%(type)s') % {'id': request_spec['volume_id'],
'type': request_spec['volume_type']}) 'type': request_spec['volume_type']})
for weighed_backend in weighed_backends: for weighed_backend in weighed_backends:
backend_state = weighed_backend.obj backend_state = weighed_backend.obj
@ -183,31 +185,30 @@ class FilterScheduler(driver.Scheduler):
return backend_state return backend_state
if migration_policy == 'never': if migration_policy == 'never':
raise exception.NoValidHost(reason=_('Current host not valid for ' raise exception.NoValidBackend(
'volume %(id)s with type ' reason=_('Current backend not valid for volume %(id)s with '
'%(type)s, migration not ' 'type %(type)s, migration not allowed') %
'allowed') % {'id': request_spec['volume_id'],
{'id': request_spec['volume_id'], 'type': request_spec['volume_type']})
'type': request_spec['volume_type']})
top_host = self._choose_top_host(weighed_backends, request_spec) top_backend = self._choose_top_backend(weighed_backends, request_spec)
return top_host.obj return top_backend.obj
def get_pools(self, context, filters): def get_pools(self, context, filters):
# TODO(zhiteng) Add filters support # TODO(zhiteng) Add filters support
return self.host_manager.get_pools(context) return self.host_manager.get_pools(context)
def _post_select_populate_filter_properties(self, filter_properties, def _post_select_populate_filter_properties(self, filter_properties,
host_state): backend_state):
"""Populate filter properties with additional information. """Populate filter properties with additional information.
Add additional information to the filter properties after a host has Add additional information to the filter properties after a backend has
been selected by the scheduling process. been selected by the scheduling process.
""" """
# Add a retry entry for the selected volume backend: # Add a retry entry for the selected volume backend:
self._add_retry_host(filter_properties, host_state.backend_id) self._add_retry_backend(filter_properties, backend_state.backend_id)
def _add_retry_host(self, filter_properties, host): def _add_retry_backend(self, filter_properties, backend):
"""Add a retry entry for the selected volume backend. """Add a retry entry for the selected volume backend.
In the event that the request gets re-scheduled, this entry will signal In the event that the request gets re-scheduled, this entry will signal
@ -216,8 +217,11 @@ class FilterScheduler(driver.Scheduler):
retry = filter_properties.get('retry', None) retry = filter_properties.get('retry', None)
if not retry: if not retry:
return return
hosts = retry['hosts'] # TODO(geguileo): In P - change to only use backends
hosts.append(host) for key in ('hosts', 'backends'):
backends = retry.get(key)
if backends is not None:
backends.append(backend)
def _max_attempts(self): def _max_attempts(self):
max_attempts = CONF.scheduler_max_attempts max_attempts = CONF.scheduler_max_attempts
@ -233,21 +237,22 @@ class FilterScheduler(driver.Scheduler):
if not exc: if not exc:
return # no exception info from a previous attempt, skip return # no exception info from a previous attempt, skip
hosts = retry.get('hosts', None) # TODO(geguileo): In P - change to hosts = retry.get('backends')
if not hosts: backends = retry.get('backends', retry.get('hosts'))
if not backends:
return # no previously attempted hosts, skip return # no previously attempted hosts, skip
last_host = hosts[-1] last_backend = backends[-1]
LOG.error(_LE("Error scheduling %(volume_id)s from last vol-service: " LOG.error(_LE("Error scheduling %(volume_id)s from last vol-service: "
"%(last_host)s : %(exc)s"), "%(last_backend)s : %(exc)s"),
{'volume_id': volume_id, {'volume_id': volume_id,
'last_host': last_host, 'last_backend': last_backend,
'exc': exc}) 'exc': exc})
def _populate_retry(self, filter_properties, properties): def _populate_retry(self, filter_properties, properties):
"""Populate filter properties with history of retries for request. """Populate filter properties with history of retries for request.
If maximum retries is exceeded, raise NoValidHost. If maximum retries is exceeded, raise NoValidBackend.
""" """
max_attempts = self.max_attempts max_attempts = self.max_attempts
retry = filter_properties.pop('retry', {}) retry = filter_properties.pop('retry', {})
@ -262,7 +267,8 @@ class FilterScheduler(driver.Scheduler):
else: else:
retry = { retry = {
'num_attempts': 1, 'num_attempts': 1,
'hosts': [] # list of volume service hosts tried 'backends': [], # list of volume service backends tried
'hosts': [] # TODO(geguileo): Remove in P and leave backends
} }
filter_properties['retry'] = retry filter_properties['retry'] = retry
@ -270,7 +276,7 @@ class FilterScheduler(driver.Scheduler):
self._log_volume_error(volume_id, retry) self._log_volume_error(volume_id, retry)
if retry['num_attempts'] > max_attempts: if retry['num_attempts'] > max_attempts:
raise exception.NoValidHost( raise exception.NoValidBackend(
reason=_("Exceeded max scheduling attempts %(max_attempts)d " reason=_("Exceeded max scheduling attempts %(max_attempts)d "
"for volume %(volume_id)s") % "for volume %(volume_id)s") %
{'max_attempts': max_attempts, {'max_attempts': max_attempts,
@ -278,7 +284,7 @@ class FilterScheduler(driver.Scheduler):
def _get_weighted_candidates(self, context, request_spec, def _get_weighted_candidates(self, context, request_spec,
filter_properties=None): filter_properties=None):
"""Return a list of hosts that meet required specs. """Return a list of backends that meet required specs.
Returned list is ordered by their fitness. Returned list is ordered by their fitness.
""" """
@ -320,26 +326,26 @@ class FilterScheduler(driver.Scheduler):
resource_type['extra_specs'].update( resource_type['extra_specs'].update(
multiattach='<is> True') multiattach='<is> True')
# Find our local list of acceptable hosts by filtering and # Find our local list of acceptable backends by filtering and
# weighing our options. we virtually consume resources on # weighing our options. we virtually consume resources on
# it so subsequent selections can adjust accordingly. # it so subsequent selections can adjust accordingly.
# Note: remember, we are using an iterator here. So only # Note: remember, we are using an iterator here. So only
# traverse this list once. # traverse this list once.
hosts = self.host_manager.get_all_host_states(elevated) backends = self.host_manager.get_all_backend_states(elevated)
# Filter local hosts based on requirements ... # Filter local hosts based on requirements ...
hosts = self.host_manager.get_filtered_hosts(hosts, backends = self.host_manager.get_filtered_backends(backends,
filter_properties) filter_properties)
if not hosts: if not backends:
return [] return []
LOG.debug("Filtered %s", hosts) LOG.debug("Filtered %s", backends)
# weighted_host = WeightedHost() ... the best # weighted_backends = WeightedHost() ... the best
# host for the job. # backend for the job.
weighed_hosts = self.host_manager.get_weighed_hosts(hosts, weighed_backends = self.host_manager.get_weighed_backends(
filter_properties) backends, filter_properties)
return weighed_hosts return weighed_backends
def _get_weighted_candidates_group(self, context, request_spec_list, def _get_weighted_candidates_group(self, context, request_spec_list,
filter_properties_list=None): filter_properties_list=None):
@ -350,7 +356,7 @@ class FilterScheduler(driver.Scheduler):
""" """
elevated = context.elevated() elevated = context.elevated()
weighed_hosts = [] weighed_backends = []
index = 0 index = 0
for request_spec in request_spec_list: for request_spec in request_spec_list:
volume_properties = request_spec['volume_properties'] volume_properties = request_spec['volume_properties']
@ -388,67 +394,67 @@ class FilterScheduler(driver.Scheduler):
self.populate_filter_properties(request_spec, self.populate_filter_properties(request_spec,
filter_properties) filter_properties)
# Find our local list of acceptable hosts by filtering and # Find our local list of acceptable backends by filtering and
# weighing our options. we virtually consume resources on # weighing our options. we virtually consume resources on
# it so subsequent selections can adjust accordingly. # it so subsequent selections can adjust accordingly.
# Note: remember, we are using an iterator here. So only # Note: remember, we are using an iterator here. So only
# traverse this list once. # traverse this list once.
all_hosts = self.host_manager.get_all_host_states(elevated) all_backends = self.host_manager.get_all_backend_states(elevated)
if not all_hosts: if not all_backends:
return [] return []
# Filter local hosts based on requirements ... # Filter local backends based on requirements ...
hosts = self.host_manager.get_filtered_hosts(all_hosts, backends = self.host_manager.get_filtered_backends(
filter_properties) all_backends, filter_properties)
if not hosts: if not backends:
return [] return []
LOG.debug("Filtered %s", hosts) LOG.debug("Filtered %s", backends)
# weighted_host = WeightedHost() ... the best # weighted_host = WeightedHost() ... the best
# host for the job. # host for the job.
temp_weighed_hosts = self.host_manager.get_weighed_hosts( temp_weighed_backends = self.host_manager.get_weighed_backends(
hosts, backends,
filter_properties) filter_properties)
if not temp_weighed_hosts: if not temp_weighed_backends:
return [] return []
if index == 0: if index == 0:
weighed_hosts = temp_weighed_hosts weighed_backends = temp_weighed_backends
else: else:
new_weighed_hosts = [] new_weighed_backends = []
for host1 in weighed_hosts: for backend1 in weighed_backends:
for host2 in temp_weighed_hosts: for backend2 in temp_weighed_backends:
# Should schedule creation of CG on backend level, # Should schedule creation of CG on backend level,
# not pool level. # not pool level.
if (utils.extract_host(host1.obj.backend_id) == if (utils.extract_host(backend1.obj.backend_id) ==
utils.extract_host(host2.obj.backend_id)): utils.extract_host(backend2.obj.backend_id)):
new_weighed_hosts.append(host1) new_weighed_backends.append(backend1)
weighed_hosts = new_weighed_hosts weighed_backends = new_weighed_backends
if not weighed_hosts: if not weighed_backends:
return [] return []
index += 1 index += 1
return weighed_hosts return weighed_backends
def _get_weighted_candidates_generic_group( def _get_weighted_candidates_generic_group(
self, context, group_spec, request_spec_list, self, context, group_spec, request_spec_list,
group_filter_properties=None, group_filter_properties=None,
filter_properties_list=None): filter_properties_list=None):
"""Finds hosts that supports the group. """Finds backends that supports the group.
Returns a list of hosts that meet the required specs, Returns a list of backends that meet the required specs,
ordered by their fitness. ordered by their fitness.
""" """
elevated = context.elevated() elevated = context.elevated()
hosts_by_group_type = self._get_weighted_candidates_by_group_type( backends_by_group_type = self._get_weighted_candidates_by_group_type(
context, group_spec, group_filter_properties) context, group_spec, group_filter_properties)
weighed_hosts = [] weighed_backends = []
hosts_by_vol_type = [] backends_by_vol_type = []
index = 0 index = 0
for request_spec in request_spec_list: for request_spec in request_spec_list:
volume_properties = request_spec['volume_properties'] volume_properties = request_spec['volume_properties']
@ -486,72 +492,72 @@ class FilterScheduler(driver.Scheduler):
self.populate_filter_properties(request_spec, self.populate_filter_properties(request_spec,
filter_properties) filter_properties)
# Find our local list of acceptable hosts by filtering and # Find our local list of acceptable backends by filtering and
# weighing our options. we virtually consume resources on # weighing our options. we virtually consume resources on
# it so subsequent selections can adjust accordingly. # it so subsequent selections can adjust accordingly.
# Note: remember, we are using an iterator here. So only # Note: remember, we are using an iterator here. So only
# traverse this list once. # traverse this list once.
all_hosts = self.host_manager.get_all_host_states(elevated) all_backends = self.host_manager.get_all_backend_states(elevated)
if not all_hosts: if not all_backends:
return [] return []
# Filter local hosts based on requirements ... # Filter local backends based on requirements ...
hosts = self.host_manager.get_filtered_hosts(all_hosts, backends = self.host_manager.get_filtered_backends(
filter_properties) all_backends, filter_properties)
if not hosts: if not backends:
return [] return []
LOG.debug("Filtered %s", hosts) LOG.debug("Filtered %s", backends)
# weighted_host = WeightedHost() ... the best # weighted_backend = WeightedHost() ... the best
# host for the job. # backend for the job.
temp_weighed_hosts = self.host_manager.get_weighed_hosts( temp_weighed_backends = self.host_manager.get_weighed_backends(
hosts, backends,
filter_properties) filter_properties)
if not temp_weighed_hosts: if not temp_weighed_backends:
return [] return []
if index == 0: if index == 0:
hosts_by_vol_type = temp_weighed_hosts backends_by_vol_type = temp_weighed_backends
else: else:
hosts_by_vol_type = self._find_valid_hosts( backends_by_vol_type = self._find_valid_backends(
hosts_by_vol_type, temp_weighed_hosts) backends_by_vol_type, temp_weighed_backends)
if not hosts_by_vol_type: if not backends_by_vol_type:
return [] return []
index += 1 index += 1
# Find hosts selected by both the group type and volume types. # Find backends selected by both the group type and volume types.
weighed_hosts = self._find_valid_hosts(hosts_by_vol_type, weighed_backends = self._find_valid_backends(backends_by_vol_type,
hosts_by_group_type) backends_by_group_type)
return weighed_hosts return weighed_backends
def _find_valid_hosts(self, host_list1, host_list2): def _find_valid_backends(self, backend_list1, backend_list2):
new_hosts = [] new_backends = []
for host1 in host_list1: for backend1 in backend_list1:
for host2 in host_list2: for backend2 in backend_list2:
# Should schedule creation of group on backend level, # Should schedule creation of group on backend level,
# not pool level. # not pool level.
if (utils.extract_host(host1.obj.backend_id) == if (utils.extract_host(backend1.obj.backend_id) ==
utils.extract_host(host2.obj.backend_id)): utils.extract_host(backend2.obj.backend_id)):
new_hosts.append(host1) new_backends.append(backend1)
if not new_hosts: if not new_backends:
return [] return []
return new_hosts return new_backends
def _get_weighted_candidates_by_group_type( def _get_weighted_candidates_by_group_type(
self, context, group_spec, self, context, group_spec,
group_filter_properties=None): group_filter_properties=None):
"""Finds hosts that supports the group type. """Finds backends that supports the group type.
Returns a list of hosts that meet the required specs, Returns a list of backends that meet the required specs,
ordered by their fitness. ordered by their fitness.
""" """
elevated = context.elevated() elevated = context.elevated()
weighed_hosts = [] weighed_backends = []
volume_properties = group_spec['volume_properties'] volume_properties = group_spec['volume_properties']
# Since Cinder is using mixed filters from Oslo and it's own, which # Since Cinder is using mixed filters from Oslo and it's own, which
# takes 'resource_XX' and 'volume_XX' as input respectively, # takes 'resource_XX' and 'volume_XX' as input respectively,
@ -577,97 +583,97 @@ class FilterScheduler(driver.Scheduler):
self.populate_filter_properties(group_spec, self.populate_filter_properties(group_spec,
group_filter_properties) group_filter_properties)
# Find our local list of acceptable hosts by filtering and # Find our local list of acceptable backends by filtering and
# weighing our options. we virtually consume resources on # weighing our options. we virtually consume resources on
# it so subsequent selections can adjust accordingly. # it so subsequent selections can adjust accordingly.
# Note: remember, we are using an iterator here. So only # Note: remember, we are using an iterator here. So only
# traverse this list once. # traverse this list once.
all_hosts = self.host_manager.get_all_host_states(elevated) all_backends = self.host_manager.get_all_backend_states(elevated)
if not all_hosts: if not all_backends:
return [] return []
# Filter local hosts based on requirements ... # Filter local backends based on requirements ...
hosts = self.host_manager.get_filtered_hosts(all_hosts, backends = self.host_manager.get_filtered_backends(
group_filter_properties) all_backends, group_filter_properties)
if not hosts: if not backends:
return [] return []
LOG.debug("Filtered %s", hosts) LOG.debug("Filtered %s", backends)
# weighted_host = WeightedHost() ... the best # weighted_backends = WeightedHost() ... the best backend for the job.
# host for the job. weighed_backends = self.host_manager.get_weighed_backends(
weighed_hosts = self.host_manager.get_weighed_hosts( backends,
hosts,
group_filter_properties) group_filter_properties)
if not weighed_hosts: if not weighed_backends:
return [] return []
return weighed_hosts return weighed_backends
def _schedule(self, context, request_spec, filter_properties=None): def _schedule(self, context, request_spec, filter_properties=None):
weighed_hosts = self._get_weighted_candidates(context, request_spec, weighed_backends = self._get_weighted_candidates(context, request_spec,
filter_properties) filter_properties)
# When we get the weighed_hosts, we clear those hosts whose backend # When we get the weighed_backends, we clear those backends that don't
# is not same as consistencygroup's backend. # match the consistencygroup's backend.
if request_spec.get('CG_backend'): if request_spec.get('CG_backend'):
group_backend = request_spec.get('CG_backend') group_backend = request_spec.get('CG_backend')
else: else:
group_backend = request_spec.get('group_backend') group_backend = request_spec.get('group_backend')
if weighed_hosts and group_backend: if weighed_backends and group_backend:
# Get host name including host@backend#pool info from # Get host name including host@backend#pool info from
# weighed_hosts. # weighed_backends.
for host in weighed_hosts[::-1]: for backend in weighed_backends[::-1]:
backend = utils.extract_host(host.obj.backend_id) backend_id = utils.extract_host(backend.obj.backend_id)
if backend != group_backend: if backend_id != group_backend:
weighed_hosts.remove(host) weighed_backends.remove(backend)
if not weighed_hosts: if not weighed_backends:
LOG.warning(_LW('No weighed hosts found for volume ' LOG.warning(_LW('No weighed backend found for volume '
'with properties: %s'), 'with properties: %s'),
filter_properties['request_spec'].get('volume_type')) filter_properties['request_spec'].get('volume_type'))
return None return None
return self._choose_top_host(weighed_hosts, request_spec) return self._choose_top_backend(weighed_backends, request_spec)
def _schedule_group(self, context, request_spec_list, def _schedule_group(self, context, request_spec_list,
filter_properties_list=None): filter_properties_list=None):
weighed_hosts = self._get_weighted_candidates_group( weighed_backends = self._get_weighted_candidates_group(
context, context,
request_spec_list, request_spec_list,
filter_properties_list) filter_properties_list)
if not weighed_hosts: if not weighed_backends:
return None return None
return self._choose_top_host_group(weighed_hosts, request_spec_list) return self._choose_top_backend_group(weighed_backends,
request_spec_list)
def _schedule_generic_group(self, context, group_spec, request_spec_list, def _schedule_generic_group(self, context, group_spec, request_spec_list,
group_filter_properties=None, group_filter_properties=None,
filter_properties_list=None): filter_properties_list=None):
weighed_hosts = self._get_weighted_candidates_generic_group( weighed_backends = self._get_weighted_candidates_generic_group(
context, context,
group_spec, group_spec,
request_spec_list, request_spec_list,
group_filter_properties, group_filter_properties,
filter_properties_list) filter_properties_list)
if not weighed_hosts: if not weighed_backends:
return None return None
return self._choose_top_host_generic_group(weighed_hosts) return self._choose_top_backend_generic_group(weighed_backends)
def _choose_top_host(self, weighed_hosts, request_spec): def _choose_top_backend(self, weighed_backends, request_spec):
top_host = weighed_hosts[0] top_backend = weighed_backends[0]
host_state = top_host.obj backend_state = top_backend.obj
LOG.debug("Choosing %s", host_state.backend_id) LOG.debug("Choosing %s", backend_state.backend_id)
volume_properties = request_spec['volume_properties'] volume_properties = request_spec['volume_properties']
host_state.consume_from_volume(volume_properties) backend_state.consume_from_volume(volume_properties)
return top_host return top_backend
def _choose_top_host_group(self, weighed_hosts, request_spec_list): def _choose_top_backend_group(self, weighed_backends, request_spec_list):
top_host = weighed_hosts[0] top_backend = weighed_backends[0]
host_state = top_host.obj backend_state = top_backend.obj
LOG.debug("Choosing %s", host_state.backend_id) LOG.debug("Choosing %s", backend_state.backend_id)
return top_host return top_backend
def _choose_top_host_generic_group(self, weighed_hosts): def _choose_top_backend_generic_group(self, weighed_backends):
top_host = weighed_hosts[0] top_backend = weighed_backends[0]
host_state = top_host.obj backend_state = top_backend.obj
LOG.debug("Choosing %s", host_state.backend_id) LOG.debug("Choosing %s", backend_state.backend_id)
return top_host return top_backend

View File

@ -20,13 +20,15 @@ Scheduler host filters
from cinder.scheduler import base_filter from cinder.scheduler import base_filter
class BaseHostFilter(base_filter.BaseFilter): class BaseBackendFilter(base_filter.BaseFilter):
"""Base class for host filters.""" """Base class for host filters."""
def _filter_one(self, obj, filter_properties): def _filter_one(self, obj, filter_properties):
"""Return True if the object passes the filter, otherwise False.""" """Return True if the object passes the filter, otherwise False."""
return self.host_passes(obj, filter_properties) # For backward compatibility with out of tree filters
passes_method = getattr(self, 'host_passes', self.backend_passes)
return passes_method(obj, filter_properties)
def host_passes(self, host_state, filter_properties): def backend_passes(self, host_state, filter_properties):
"""Return True if the HostState passes the filter, otherwise False. """Return True if the HostState passes the filter, otherwise False.
Override this in a subclass. Override this in a subclass.
@ -34,6 +36,12 @@ class BaseHostFilter(base_filter.BaseFilter):
raise NotImplementedError() raise NotImplementedError()
class HostFilterHandler(base_filter.BaseFilterHandler): class BackendFilterHandler(base_filter.BaseFilterHandler):
def __init__(self, namespace): def __init__(self, namespace):
super(HostFilterHandler, self).__init__(BaseHostFilter, namespace) super(BackendFilterHandler, self).__init__(BaseHostFilter, namespace)
# NOTE(geguileo): For backward compatibility with external filters that
# inherit from these classes
BaseHostFilter = BaseBackendFilter
HostFilterHandler = BackendFilterHandler

View File

@ -20,7 +20,7 @@ from cinder.scheduler import filters
from cinder.volume import api as volume from cinder.volume import api as volume
class AffinityFilter(filters.BaseHostFilter): class AffinityFilter(filters.BaseBackendFilter):
def __init__(self): def __init__(self):
self.volume_api = volume.API() self.volume_api = volume.API()
@ -36,7 +36,7 @@ class AffinityFilter(filters.BaseHostFilter):
class DifferentBackendFilter(AffinityFilter): class DifferentBackendFilter(AffinityFilter):
"""Schedule volume on a different back-end from a set of volumes.""" """Schedule volume on a different back-end from a set of volumes."""
def host_passes(self, host_state, filter_properties): def backend_passes(self, backend_state, filter_properties):
context = filter_properties['context'] context = filter_properties['context']
scheduler_hints = filter_properties.get('scheduler_hints') or {} scheduler_hints = filter_properties.get('scheduler_hints') or {}
@ -62,7 +62,7 @@ class DifferentBackendFilter(AffinityFilter):
if affinity_uuids: if affinity_uuids:
return not self._get_volumes(context, affinity_uuids, return not self._get_volumes(context, affinity_uuids,
host_state) backend_state)
# With no different_host key # With no different_host key
return True return True
@ -70,7 +70,7 @@ class DifferentBackendFilter(AffinityFilter):
class SameBackendFilter(AffinityFilter): class SameBackendFilter(AffinityFilter):
"""Schedule volume on the same back-end as another volume.""" """Schedule volume on the same back-end as another volume."""
def host_passes(self, host_state, filter_properties): def backend_passes(self, backend_state, filter_properties):
context = filter_properties['context'] context = filter_properties['context']
scheduler_hints = filter_properties.get('scheduler_hints') or {} scheduler_hints = filter_properties.get('scheduler_hints') or {}
@ -95,7 +95,7 @@ class SameBackendFilter(AffinityFilter):
return False return False
if affinity_uuids: if affinity_uuids:
return self._get_volumes(context, affinity_uuids, host_state) return self._get_volumes(context, affinity_uuids, backend_state)
# With no same_host key # With no same_host key
return True return True

View File

@ -16,17 +16,18 @@
from cinder.scheduler import filters from cinder.scheduler import filters
class AvailabilityZoneFilter(filters.BaseHostFilter): class AvailabilityZoneFilter(filters.BaseBackendFilter):
"""Filters Hosts by availability zone.""" """Filters Backends by availability zone."""
# Availability zones do not change within a request # Availability zones do not change within a request
run_filter_once_per_request = True run_filter_once_per_request = True
def host_passes(self, host_state, filter_properties): def backend_passes(self, backend_state, filter_properties):
spec = filter_properties.get('request_spec', {}) spec = filter_properties.get('request_spec', {})
props = spec.get('resource_properties', {}) props = spec.get('resource_properties', {})
availability_zone = props.get('availability_zone') availability_zone = props.get('availability_zone')
if availability_zone: if availability_zone:
return availability_zone == host_state.service['availability_zone'] return (availability_zone ==
backend_state.service['availability_zone'])
return True return True

View File

@ -21,8 +21,8 @@ from cinder.scheduler.filters import extra_specs_ops
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class CapabilitiesFilter(filters.BaseHostFilter): class CapabilitiesFilter(filters.BaseBackendFilter):
"""HostFilter to work with resource (instance & volume) type records.""" """BackendFilter to work with resource (instance & volume) type records."""
def _satisfies_extra_specs(self, capabilities, resource_type): def _satisfies_extra_specs(self, capabilities, resource_type):
"""Check if capabilities satisfy resource type requirements. """Check if capabilities satisfy resource type requirements.
@ -55,7 +55,7 @@ class CapabilitiesFilter(filters.BaseHostFilter):
try: try:
cap = cap[scope[index]] cap = cap[scope[index]]
except (TypeError, KeyError): except (TypeError, KeyError):
LOG.debug("Host doesn't provide capability '%(cap)s' " % LOG.debug("Backend doesn't provide capability '%(cap)s' " %
{'cap': scope[index]}) {'cap': scope[index]})
return False return False
@ -75,15 +75,15 @@ class CapabilitiesFilter(filters.BaseHostFilter):
return False return False
return True return True
def host_passes(self, host_state, filter_properties): def backend_passes(self, backend_state, filter_properties):
"""Return a list of hosts that can create resource_type.""" """Return a list of backends that can create resource_type."""
# Note(zhiteng) Currently only Cinder and Nova are using # Note(zhiteng) Currently only Cinder and Nova are using
# this filter, so the resource type is either instance or # this filter, so the resource type is either instance or
# volume. # volume.
resource_type = filter_properties.get('resource_type') resource_type = filter_properties.get('resource_type')
if not self._satisfies_extra_specs(host_state.capabilities, if not self._satisfies_extra_specs(backend_state.capabilities,
resource_type): resource_type):
LOG.debug("%(host_state)s fails resource_type extra_specs " LOG.debug("%(backend_state)s fails resource_type extra_specs "
"requirements", {'host_state': host_state}) "requirements", {'backend_state': backend_state})
return False return False
return True return True

View File

@ -28,22 +28,22 @@ from cinder.scheduler import filters
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class CapacityFilter(filters.BaseHostFilter): class CapacityFilter(filters.BaseBackendFilter):
"""CapacityFilter filters based on volume host's capacity utilization.""" """Capacity filters based on volume backend's capacity utilization."""
def host_passes(self, host_state, filter_properties): def backend_passes(self, backend_state, filter_properties):
"""Return True if host has sufficient capacity.""" """Return True if host has sufficient capacity."""
# If the volume already exists on this host, don't fail it for # If the volume already exists on this host, don't fail it for
# insufficient capacity (e.g., if we are retyping) # insufficient capacity (e.g., if we are retyping)
if host_state.backend_id == filter_properties.get('vol_exists_on'): if backend_state.backend_id == filter_properties.get('vol_exists_on'):
return True return True
spec = filter_properties.get('request_spec') spec = filter_properties.get('request_spec')
if spec: if spec:
volid = spec.get('volume_id') volid = spec.get('volume_id')
grouping = 'cluster' if host_state.cluster_name else 'host' grouping = 'cluster' if backend_state.cluster_name else 'host'
if filter_properties.get('new_size'): if filter_properties.get('new_size'):
# If new_size is passed, we are allocating space to extend a volume # If new_size is passed, we are allocating space to extend a volume
requested_size = (int(filter_properties.get('new_size')) - requested_size = (int(filter_properties.get('new_size')) -
@ -51,25 +51,25 @@ class CapacityFilter(filters.BaseHostFilter):
LOG.debug('Checking if %(grouping)s %(grouping_name)s can extend ' LOG.debug('Checking if %(grouping)s %(grouping_name)s can extend '
'the volume %(id)s in %(size)s GB', 'the volume %(id)s in %(size)s GB',
{'grouping': grouping, {'grouping': grouping,
'grouping_name': host_state.backend_id, 'id': volid, 'grouping_name': backend_state.backend_id, 'id': volid,
'size': requested_size}) 'size': requested_size})
else: else:
requested_size = filter_properties.get('size') requested_size = filter_properties.get('size')
LOG.debug('Checking if %(grouping)s %(grouping_name)s can create ' LOG.debug('Checking if %(grouping)s %(grouping_name)s can create '
'a %(size)s GB volume (%(id)s)', 'a %(size)s GB volume (%(id)s)',
{'grouping': grouping, {'grouping': grouping,
'grouping_name': host_state.backend_id, 'id': volid, 'grouping_name': backend_state.backend_id, 'id': volid,
'size': requested_size}) 'size': requested_size})
if host_state.free_capacity_gb is None: if backend_state.free_capacity_gb is None:
# Fail Safe # Fail Safe
LOG.error(_LE("Free capacity not set: " LOG.error(_LE("Free capacity not set: "
"volume node info collection broken.")) "volume node info collection broken."))
return False return False
free_space = host_state.free_capacity_gb free_space = backend_state.free_capacity_gb
total_space = host_state.total_capacity_gb total_space = backend_state.total_capacity_gb
reserved = float(host_state.reserved_percentage) / 100 reserved = float(backend_state.reserved_percentage) / 100
if free_space in ['infinite', 'unknown']: if free_space in ['infinite', 'unknown']:
# NOTE(zhiteng) for those back-ends cannot report actual # NOTE(zhiteng) for those back-ends cannot report actual
# available capacity, we assume it is able to serve the # available capacity, we assume it is able to serve the
@ -93,7 +93,7 @@ class CapacityFilter(filters.BaseHostFilter):
"%(grouping_name)s."), "%(grouping_name)s."),
{"total": total, {"total": total,
"grouping": grouping, "grouping": grouping,
"grouping_name": host_state.backend_id}) "grouping_name": backend_state.backend_id})
return False return False
# Calculate how much free space is left after taking into account # Calculate how much free space is left after taking into account
# the reserved space. # the reserved space.
@ -114,16 +114,16 @@ class CapacityFilter(filters.BaseHostFilter):
# thin_provisioning_support is True. Check if the ratio of # thin_provisioning_support is True. Check if the ratio of
# provisioned capacity over total capacity has exceeded over # provisioned capacity over total capacity has exceeded over
# subscription ratio. # subscription ratio.
if (thin and host_state.thin_provisioning_support and if (thin and backend_state.thin_provisioning_support and
host_state.max_over_subscription_ratio >= 1): backend_state.max_over_subscription_ratio >= 1):
provisioned_ratio = ((host_state.provisioned_capacity_gb + provisioned_ratio = ((backend_state.provisioned_capacity_gb +
requested_size) / total) requested_size) / total)
if provisioned_ratio > host_state.max_over_subscription_ratio: if provisioned_ratio > backend_state.max_over_subscription_ratio:
msg_args = { msg_args = {
"provisioned_ratio": provisioned_ratio, "provisioned_ratio": provisioned_ratio,
"oversub_ratio": host_state.max_over_subscription_ratio, "oversub_ratio": backend_state.max_over_subscription_ratio,
"grouping": grouping, "grouping": grouping,
"grouping_name": host_state.backend_id, "grouping_name": backend_state.backend_id,
} }
LOG.warning(_LW( LOG.warning(_LW(
"Insufficient free space for thin provisioning. " "Insufficient free space for thin provisioning. "
@ -140,20 +140,20 @@ class CapacityFilter(filters.BaseHostFilter):
# the currently available free capacity (taking into account # the currently available free capacity (taking into account
# of reserved space) which we can over-subscribe. # of reserved space) which we can over-subscribe.
adjusted_free_virtual = ( adjusted_free_virtual = (
free * host_state.max_over_subscription_ratio) free * backend_state.max_over_subscription_ratio)
return adjusted_free_virtual >= requested_size return adjusted_free_virtual >= requested_size
elif thin and host_state.thin_provisioning_support: elif thin and backend_state.thin_provisioning_support:
LOG.warning(_LW("Filtering out %(grouping)s %(grouping_name)s " LOG.warning(_LW("Filtering out %(grouping)s %(grouping_name)s "
"with an invalid maximum over subscription ratio " "with an invalid maximum over subscription ratio "
"of %(oversub_ratio).2f. The ratio should be a " "of %(oversub_ratio).2f. The ratio should be a "
"minimum of 1.0."), "minimum of 1.0."),
{"oversub_ratio": {"oversub_ratio":
host_state.max_over_subscription_ratio, backend_state.max_over_subscription_ratio,
"grouping": grouping, "grouping": grouping,
"grouping_name": host_state.backend_id}) "grouping_name": backend_state.backend_id})
return False return False
msg_args = {"grouping_name": host_state.backend_id, msg_args = {"grouping_name": backend_state.backend_id,
"grouping": grouping, "grouping": grouping,
"requested": requested_size, "requested": requested_size,
"available": free} "available": free}

View File

@ -24,33 +24,34 @@ from cinder.scheduler import filters
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class DriverFilter(filters.BaseHostFilter): class DriverFilter(filters.BaseBackendFilter):
"""DriverFilter filters hosts based on a 'filter function' and metrics. """DriverFilter filters backend based on a 'filter function' and metrics.
DriverFilter filters based on volume host's provided 'filter function' DriverFilter filters based on volume backend's provided 'filter function'
and metrics. and metrics.
""" """
def host_passes(self, host_state, filter_properties): def backend_passes(self, backend_state, filter_properties):
"""Determines whether a host has a passing filter_function or not.""" """Determines if a backend has a passing filter_function or not."""
stats = self._generate_stats(host_state, filter_properties) stats = self._generate_stats(backend_state, filter_properties)
LOG.debug("Checking backend '%s'", stats['host_stats']['backend_id']) LOG.debug("Checking backend '%s'",
stats['backend_stats']['backend_id'])
result = self._check_filter_function(stats) result = self._check_filter_function(stats)
LOG.debug("Result: %s", result) LOG.debug("Result: %s", result)
LOG.debug("Done checking backend '%s'", LOG.debug("Done checking backend '%s'",
stats['host_stats']['backend_id']) stats['backend_stats']['backend_id'])
return result return result
def _check_filter_function(self, stats): def _check_filter_function(self, stats):
"""Checks if a volume passes a host's filter function. """Checks if a volume passes a backend's filter function.
Returns a tuple in the format (filter_passing, filter_invalid). Returns a tuple in the format (filter_passing, filter_invalid).
Both values are booleans. Both values are booleans.
""" """
if stats['filter_function'] is None: if stats['filter_function'] is None:
LOG.debug("Filter function not set :: passing host") LOG.debug("Filter function not set :: passing backend")
return True return True
try: try:
@ -60,7 +61,7 @@ class DriverFilter(filters.BaseHostFilter):
# Warn the admin for now that there is an error in the # Warn the admin for now that there is an error in the
# filter function. # filter function.
LOG.warning(_LW("Error in filtering function " LOG.warning(_LW("Error in filtering function "
"'%(function)s' : '%(error)s' :: failing host"), "'%(function)s' : '%(error)s' :: failing backend"),
{'function': stats['filter_function'], {'function': stats['filter_function'],
'error': ex, }) 'error': ex, })
return False return False
@ -69,8 +70,8 @@ class DriverFilter(filters.BaseHostFilter):
def _run_evaluator(self, func, stats): def _run_evaluator(self, func, stats):
"""Evaluates a given function using the provided available stats.""" """Evaluates a given function using the provided available stats."""
host_stats = stats['host_stats'] backend_stats = stats['backend_stats']
host_caps = stats['host_caps'] backend_caps = stats['backend_caps']
extra_specs = stats['extra_specs'] extra_specs = stats['extra_specs']
qos_specs = stats['qos_specs'] qos_specs = stats['qos_specs']
volume_stats = stats['volume_stats'] volume_stats = stats['volume_stats']
@ -78,39 +79,39 @@ class DriverFilter(filters.BaseHostFilter):
result = evaluator.evaluate( result = evaluator.evaluate(
func, func,
extra=extra_specs, extra=extra_specs,
stats=host_stats, stats=backend_stats,
capabilities=host_caps, capabilities=backend_caps,
volume=volume_stats, volume=volume_stats,
qos=qos_specs) qos=qos_specs)
return result return result
def _generate_stats(self, host_state, filter_properties): def _generate_stats(self, backend_state, filter_properties):
"""Generates statistics from host and volume data.""" """Generates statistics from backend and volume data."""
host_stats = { backend_stats = {
'host': host_state.host, 'host': backend_state.host,
'cluster_name': host_state.cluster_name, 'cluster_name': backend_state.cluster_name,
'backend_id': host_state.backend_id, 'backend_id': backend_state.backend_id,
'volume_backend_name': host_state.volume_backend_name, 'volume_backend_name': backend_state.volume_backend_name,
'vendor_name': host_state.vendor_name, 'vendor_name': backend_state.vendor_name,
'driver_version': host_state.driver_version, 'driver_version': backend_state.driver_version,
'storage_protocol': host_state.storage_protocol, 'storage_protocol': backend_state.storage_protocol,
'QoS_support': host_state.QoS_support, 'QoS_support': backend_state.QoS_support,
'total_capacity_gb': host_state.total_capacity_gb, 'total_capacity_gb': backend_state.total_capacity_gb,
'allocated_capacity_gb': host_state.allocated_capacity_gb, 'allocated_capacity_gb': backend_state.allocated_capacity_gb,
'free_capacity_gb': host_state.free_capacity_gb, 'free_capacity_gb': backend_state.free_capacity_gb,
'reserved_percentage': host_state.reserved_percentage, 'reserved_percentage': backend_state.reserved_percentage,
'updated': host_state.updated, 'updated': backend_state.updated,
} }
host_caps = host_state.capabilities backend_caps = backend_state.capabilities
filter_function = None filter_function = None
if ('filter_function' in host_caps and if ('filter_function' in backend_caps and
host_caps['filter_function'] is not None): backend_caps['filter_function'] is not None):
filter_function = six.text_type(host_caps['filter_function']) filter_function = six.text_type(backend_caps['filter_function'])
qos_specs = filter_properties.get('qos_specs', {}) qos_specs = filter_properties.get('qos_specs', {})
@ -121,8 +122,8 @@ class DriverFilter(filters.BaseHostFilter):
volume_stats = request_spec.get('volume_properties', {}) volume_stats = request_spec.get('volume_properties', {})
stats = { stats = {
'host_stats': host_stats, 'backend_stats': backend_stats,
'host_caps': host_caps, 'backend_caps': backend_caps,
'extra_specs': extra_specs, 'extra_specs': extra_specs,
'qos_specs': qos_specs, 'qos_specs': qos_specs,
'volume_stats': volume_stats, 'volume_stats': volume_stats,

View File

@ -20,7 +20,7 @@ from cinder.scheduler import filters
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class IgnoreAttemptedHostsFilter(filters.BaseHostFilter): class IgnoreAttemptedHostsFilter(filters.BaseBackendFilter):
"""Filter out previously attempted hosts """Filter out previously attempted hosts
A host passes this filter if it has not already been attempted for A host passes this filter if it has not already been attempted for
@ -30,13 +30,13 @@ class IgnoreAttemptedHostsFilter(filters.BaseHostFilter):
{ {
'retry': { 'retry': {
'hosts': ['host1', 'host2'], 'backends': ['backend1', 'backend2'],
'num_attempts': 3, 'num_attempts': 3,
} }
} }
""" """
def host_passes(self, host_state, filter_properties): def backend_passes(self, backend_state, filter_properties):
"""Skip nodes that have already been attempted.""" """Skip nodes that have already been attempted."""
attempted = filter_properties.get('retry') attempted = filter_properties.get('retry')
if not attempted: if not attempted:
@ -44,14 +44,15 @@ class IgnoreAttemptedHostsFilter(filters.BaseHostFilter):
LOG.debug("Re-scheduling is disabled.") LOG.debug("Re-scheduling is disabled.")
return True return True
hosts = attempted.get('hosts', []) # TODO(geguileo): In P - Just use backends
host = host_state.backend_id backends = attempted.get('backends', attempted.get('hosts', []))
backend = backend_state.backend_id
passes = host not in hosts passes = backend not in backends
pass_msg = "passes" if passes else "fails" pass_msg = "passes" if passes else "fails"
LOG.debug("Host %(host)s %(pass_msg)s. Previously tried hosts: " LOG.debug("Backend %(backend)s %(pass_msg)s. Previously tried "
"%(hosts)s" % {'host': host, "backends: %(backends)s" % {'backend': backend,
'pass_msg': pass_msg, 'pass_msg': pass_msg,
'hosts': hosts}) 'backends': backends})
return passes return passes

View File

@ -30,7 +30,7 @@ INSTANCE_HOST_PROP = 'OS-EXT-SRV-ATTR:host'
REQUESTS_TIMEOUT = 5 REQUESTS_TIMEOUT = 5
class InstanceLocalityFilter(filters.BaseHostFilter): class InstanceLocalityFilter(filters.BaseBackendFilter):
"""Schedule volume on the same host as a given instance. """Schedule volume on the same host as a given instance.
This filter enables selection of a storage back-end located on the host This filter enables selection of a storage back-end located on the host
@ -51,7 +51,7 @@ class InstanceLocalityFilter(filters.BaseHostFilter):
def __init__(self): def __init__(self):
# Cache Nova API answers directly into the Filter object. # Cache Nova API answers directly into the Filter object.
# Since a BaseHostFilter instance lives only during the volume's # Since a BaseBackendFilter instance lives only during the volume's
# scheduling, the cache is re-created for every new volume creation. # scheduling, the cache is re-created for every new volume creation.
self._cache = {} self._cache = {}
super(InstanceLocalityFilter, self).__init__() super(InstanceLocalityFilter, self).__init__()
@ -69,9 +69,9 @@ class InstanceLocalityFilter(filters.BaseHostFilter):
return self._nova_ext_srv_attr return self._nova_ext_srv_attr
def host_passes(self, backend_state, filter_properties): def backend_passes(self, backend_state, filter_properties):
context = filter_properties['context'] context = filter_properties['context']
host = volume_utils.extract_host(backend_state.backend_id, 'host') backend = volume_utils.extract_host(backend_state.backend_id, 'host')
scheduler_hints = filter_properties.get('scheduler_hints') or {} scheduler_hints = filter_properties.get('scheduler_hints') or {}
instance_uuid = scheduler_hints.get(HINT_KEYWORD, None) instance_uuid = scheduler_hints.get(HINT_KEYWORD, None)
@ -93,7 +93,7 @@ class InstanceLocalityFilter(filters.BaseHostFilter):
# First, lookup for already-known information in local cache # First, lookup for already-known information in local cache
if instance_uuid in self._cache: if instance_uuid in self._cache:
return self._cache[instance_uuid] == host return self._cache[instance_uuid] == backend
if not self._nova_has_extended_server_attributes(context): if not self._nova_has_extended_server_attributes(context):
LOG.warning(_LW('Hint "%s" dropped because ' LOG.warning(_LW('Hint "%s" dropped because '
@ -116,5 +116,5 @@ class InstanceLocalityFilter(filters.BaseHostFilter):
self._cache[instance_uuid] = getattr(server, INSTANCE_HOST_PROP) self._cache[instance_uuid] = getattr(server, INSTANCE_HOST_PROP)
# Match if given instance is hosted on host # Match if given instance is hosted on backend
return self._cache[instance_uuid] == host return self._cache[instance_uuid] == backend

View File

@ -21,8 +21,8 @@ import six
from cinder.scheduler import filters from cinder.scheduler import filters
class JsonFilter(filters.BaseHostFilter): class JsonFilter(filters.BaseBackendFilter):
"""Host Filter to allow simple JSON-based grammar for selecting hosts.""" """Backend filter for simple JSON-based grammar for selecting backends."""
def _op_compare(self, args, op): def _op_compare(self, args, op):
"""Compare first item of args with the rest using specified operator. """Compare first item of args with the rest using specified operator.
@ -87,12 +87,12 @@ class JsonFilter(filters.BaseHostFilter):
'and': _and, 'and': _and,
} }
def _parse_string(self, string, host_state): def _parse_string(self, string, backend_state):
"""Parse capability lookup strings. """Parse capability lookup strings.
Strings prefixed with $ are capability lookups in the Strings prefixed with $ are capability lookups in the
form '$variable' where 'variable' is an attribute in the form '$variable' where 'variable' is an attribute in the
HostState class. If $variable is a dictionary, you may BackendState class. If $variable is a dictionary, you may
use: $variable.dictkey use: $variable.dictkey
""" """
if not string: if not string:
@ -101,7 +101,7 @@ class JsonFilter(filters.BaseHostFilter):
return string return string
path = string[1:].split(".") path = string[1:].split(".")
obj = getattr(host_state, path[0], None) obj = getattr(backend_state, path[0], None)
if obj is None: if obj is None:
return None return None
for item in path[1:]: for item in path[1:]:
@ -110,7 +110,7 @@ class JsonFilter(filters.BaseHostFilter):
return None return None
return obj return obj
def _process_filter(self, query, host_state): def _process_filter(self, query, backend_state):
"""Recursively parse the query structure.""" """Recursively parse the query structure."""
if not query: if not query:
return True return True
@ -119,16 +119,16 @@ class JsonFilter(filters.BaseHostFilter):
cooked_args = [] cooked_args = []
for arg in query[1:]: for arg in query[1:]:
if isinstance(arg, list): if isinstance(arg, list):
arg = self._process_filter(arg, host_state) arg = self._process_filter(arg, backend_state)
elif isinstance(arg, six.string_types): elif isinstance(arg, six.string_types):
arg = self._parse_string(arg, host_state) arg = self._parse_string(arg, backend_state)
if arg is not None: if arg is not None:
cooked_args.append(arg) cooked_args.append(arg)
result = method(self, cooked_args) result = method(self, cooked_args)
return result return result
def host_passes(self, host_state, filter_properties): def backend_passes(self, backend_state, filter_properties):
"""Return a list of hosts that can fulfill query requirements.""" """Return a list of backends that can fulfill query requirements."""
# TODO(zhiteng) Add description for filter_properties structure # TODO(zhiteng) Add description for filter_properties structure
# and scheduler_hints. # and scheduler_hints.
try: try:
@ -141,9 +141,9 @@ class JsonFilter(filters.BaseHostFilter):
# NOTE(comstud): Not checking capabilities or service for # NOTE(comstud): Not checking capabilities or service for
# enabled/disabled so that a provided json filter can decide # enabled/disabled so that a provided json filter can decide
result = self._process_filter(jsonutils.loads(query), host_state) result = self._process_filter(jsonutils.loads(query), backend_state)
if isinstance(result, list): if isinstance(result, list):
# If any succeeded, include the host # If any succeeded, include the backend
result = any(result) result = any(result)
if result: if result:
# Filter it out. # Filter it out.

View File

@ -124,11 +124,11 @@ class ScheduleCreateVolumeTask(flow_utils.CinderTask):
except Exception as e: except Exception as e:
# An error happened, notify on the scheduler queue and log that # An error happened, notify on the scheduler queue and log that
# this happened and set the volume to errored out and reraise the # this happened and set the volume to errored out and reraise the
# error *if* exception caught isn't NoValidHost. Otherwise *do not* # error *if* exception caught isn't NoValidBackend. Otherwise *do
# reraise (since what's the point?) # not* reraise (since what's the point?)
with excutils.save_and_reraise_exception( with excutils.save_and_reraise_exception(
reraise=not isinstance(e, exception.NoValidHost)): reraise=not isinstance(e, exception.NoValidBackend)):
if isinstance(e, exception.NoValidHost): if isinstance(e, exception.NoValidBackend):
self.message_api.create( self.message_api.create(
context, context,
defined_messages.UNABLE_TO_ALLOCATE, defined_messages.UNABLE_TO_ALLOCATE,

View File

@ -14,7 +14,7 @@
# under the License. # under the License.
""" """
Manage hosts in the current zone. Manage backends in the current zone.
""" """
import collections import collections
@ -34,6 +34,11 @@ from cinder.scheduler import filters
from cinder.volume import utils as vol_utils from cinder.volume import utils as vol_utils
# FIXME: This file should be renamed to backend_manager, we should also rename
# HostManager class, and scheduler_host_manager option, and also the weight
# classes, and add code to maintain backward compatibility.
host_manager_opts = [ host_manager_opts = [
cfg.ListOpt('scheduler_default_filters', cfg.ListOpt('scheduler_default_filters',
default=[ default=[
@ -83,7 +88,7 @@ class ReadOnlyDict(collections.Mapping):
return '%s(%r)' % (self.__class__.__name__, self.data) return '%s(%r)' % (self.__class__.__name__, self.data)
class HostState(object): class BackendState(object):
"""Mutable and immutable information tracked for a volume backend.""" """Mutable and immutable information tracked for a volume backend."""
def __init__(self, host, cluster_name, capabilities=None, service=None): def __init__(self, host, cluster_name, capabilities=None, service=None):
@ -303,12 +308,11 @@ class HostState(object):
# come up with better representation of HostState. # come up with better representation of HostState.
grouping = 'cluster' if self.cluster_name else 'host' grouping = 'cluster' if self.cluster_name else 'host'
grouping_name = self.backend_id grouping_name = self.backend_id
return ("%s '%s': free_capacity_gb: %s, pools: %s" % return ("%s '%s': free_capacity_gb: %s, pools: %s" %
(grouping, grouping_name, self.free_capacity_gb, self.pools)) (grouping, grouping_name, self.free_capacity_gb, self.pools))
class PoolState(HostState): class PoolState(BackendState):
def __init__(self, host, cluster_name, capabilities, pool_name): def __init__(self, host, cluster_name, capabilities, pool_name):
new_host = vol_utils.append_host(host, pool_name) new_host = vol_utils.append_host(host, pool_name)
new_cluster = vol_utils.append_host(cluster_name, pool_name) new_cluster = vol_utils.append_host(cluster_name, pool_name)
@ -356,7 +360,7 @@ class PoolState(HostState):
class HostManager(object): class HostManager(object):
"""Base HostManager class.""" """Base HostManager class."""
host_state_cls = HostState backend_state_cls = BackendState
REQUIRED_KEYS = frozenset([ REQUIRED_KEYS = frozenset([
'pool_name', 'pool_name',
@ -371,20 +375,20 @@ class HostManager(object):
def __init__(self): def __init__(self):
self.service_states = {} # { <host>: {<service>: {cap k : v}}} self.service_states = {} # { <host>: {<service>: {cap k : v}}}
self.host_state_map = {} self.backend_state_map = {}
self.filter_handler = filters.HostFilterHandler('cinder.scheduler.' self.filter_handler = filters.BackendFilterHandler('cinder.scheduler.'
'filters') 'filters')
self.filter_classes = self.filter_handler.get_all_classes() self.filter_classes = self.filter_handler.get_all_classes()
self.weight_handler = importutils.import_object( self.weight_handler = importutils.import_object(
CONF.scheduler_weight_handler, CONF.scheduler_weight_handler,
'cinder.scheduler.weights') 'cinder.scheduler.weights')
self.weight_classes = self.weight_handler.get_all_classes() self.weight_classes = self.weight_handler.get_all_classes()
self._no_capabilities_hosts = set() # Hosts having no capabilities self._no_capabilities_hosts = set() # Services without capabilities
self._update_host_state_map(cinder_context.get_admin_context()) self._update_backend_state_map(cinder_context.get_admin_context())
self.service_states_last_update = {} self.service_states_last_update = {}
def _choose_host_filters(self, filter_cls_names): def _choose_backend_filters(self, filter_cls_names):
"""Return a list of available filter names. """Return a list of available filter names.
This function checks input filter names against a predefined set This function checks input filter names against a predefined set
@ -411,7 +415,7 @@ class HostManager(object):
filter_name=", ".join(bad_filters)) filter_name=", ".join(bad_filters))
return good_filters return good_filters
def _choose_host_weighers(self, weight_cls_names): def _choose_backend_weighers(self, weight_cls_names):
"""Return a list of available weigher names. """Return a list of available weigher names.
This function checks input weigher names against a predefined set This function checks input weigher names against a predefined set
@ -439,20 +443,20 @@ class HostManager(object):
weigher_name=", ".join(bad_weighers)) weigher_name=", ".join(bad_weighers))
return good_weighers return good_weighers
def get_filtered_hosts(self, hosts, filter_properties, def get_filtered_backends(self, backends, filter_properties,
filter_class_names=None): filter_class_names=None):
"""Filter hosts and return only ones passing all filters.""" """Filter backends and return only ones passing all filters."""
filter_classes = self._choose_host_filters(filter_class_names) filter_classes = self._choose_backend_filters(filter_class_names)
return self.filter_handler.get_filtered_objects(filter_classes, return self.filter_handler.get_filtered_objects(filter_classes,
hosts, backends,
filter_properties) filter_properties)
def get_weighed_hosts(self, hosts, weight_properties, def get_weighed_backends(self, backends, weight_properties,
weigher_class_names=None): weigher_class_names=None):
"""Weigh the hosts.""" """Weigh the backends."""
weigher_classes = self._choose_host_weighers(weigher_class_names) weigher_classes = self._choose_backend_weighers(weigher_class_names)
return self.weight_handler.get_weighed_objects(weigher_classes, return self.weight_handler.get_weighed_objects(weigher_classes,
hosts, backends,
weight_properties) weight_properties)
def update_service_capabilities(self, service_name, host, capabilities, def update_service_capabilities(self, service_name, host, capabilities,
@ -532,7 +536,7 @@ class HostManager(object):
def has_all_capabilities(self): def has_all_capabilities(self):
return len(self._no_capabilities_hosts) == 0 return len(self._no_capabilities_hosts) == 0
def _update_host_state_map(self, context): def _update_backend_state_map(self, context):
# Get resource usage across the available volume nodes: # Get resource usage across the available volume nodes:
topic = constants.VOLUME_TOPIC topic = constants.VOLUME_TOPIC
@ -555,14 +559,14 @@ class HostManager(object):
# Since the service could have been added or remove from a cluster # Since the service could have been added or remove from a cluster
backend_key = service.service_topic_queue backend_key = service.service_topic_queue
backend_state = self.host_state_map.get(backend_key, None) backend_state = self.backend_state_map.get(backend_key, None)
if not backend_state: if not backend_state:
backend_state = self.host_state_cls( backend_state = self.backend_state_cls(
host, host,
service.cluster_name, service.cluster_name,
capabilities=capabilities, capabilities=capabilities,
service=dict(service)) service=dict(service))
self.host_state_map[backend_key] = backend_state self.backend_state_map[backend_key] = backend_state
# We may be receiving capability reports out of order from # We may be receiving capability reports out of order from
# different volume services in a cluster, so we drop older updates # different volume services in a cluster, so we drop older updates
@ -577,8 +581,8 @@ class HostManager(object):
self._no_capabilities_hosts = no_capabilities_hosts self._no_capabilities_hosts = no_capabilities_hosts
# remove non-active keys from host_state_map # remove non-active keys from backend_state_map
inactive_backend_keys = set(self.host_state_map) - active_backends inactive_backend_keys = set(self.backend_state_map) - active_backends
for backend_key in inactive_backend_keys: for backend_key in inactive_backend_keys:
# NOTE(geguileo): We don't want to log the removal of a host from # NOTE(geguileo): We don't want to log the removal of a host from
# the map when we are removing it because it has been added to a # the map when we are removing it because it has been added to a
@ -586,27 +590,28 @@ class HostManager(object):
if backend_key not in active_hosts: if backend_key not in active_hosts:
LOG.info(_LI("Removing non-active backend: %(backend)s from " LOG.info(_LI("Removing non-active backend: %(backend)s from "
"scheduler cache."), {'backend': backend_key}) "scheduler cache."), {'backend': backend_key})
del self.host_state_map[backend_key] del self.backend_state_map[backend_key]
def get_all_host_states(self, context): def get_all_backend_states(self, context):
"""Returns a dict of all the hosts the HostManager knows about. """Returns a dict of all the backends the HostManager knows about.
Each of the consumable resources in HostState are Each of the consumable resources in BackendState are
populated with capabilities scheduler received from RPC. populated with capabilities scheduler received from RPC.
For example: For example:
{'192.168.1.100': HostState(), ...} {'192.168.1.100': BackendState(), ...}
""" """
self._update_host_state_map(context) self._update_backend_state_map(context)
# build a pool_state map and return that map instead of host_state_map # build a pool_state map and return that map instead of
# backend_state_map
all_pools = {} all_pools = {}
for host, state in self.host_state_map.items(): for backend_key, state in self.backend_state_map.items():
for key in state.pools: for key in state.pools:
pool = state.pools[key] pool = state.pools[key]
# use host.pool_name to make sure key is unique # use backend_key.pool_name to make sure key is unique
pool_key = '.'.join([host, pool.pool_name]) pool_key = '.'.join([backend_key, pool.pool_name])
all_pools[pool_key] = pool all_pools[pool_key] = pool
return all_pools.values() return all_pools.values()
@ -614,14 +619,14 @@ class HostManager(object):
def get_pools(self, context): def get_pools(self, context):
"""Returns a dict of all pools on all hosts HostManager knows about.""" """Returns a dict of all pools on all hosts HostManager knows about."""
self._update_host_state_map(context) self._update_backend_state_map(context)
all_pools = [] all_pools = []
for host, state in self.host_state_map.items(): for backend_key, state in self.backend_state_map.items():
for key in state.pools: for key in state.pools:
pool = state.pools[key] pool = state.pools[key]
# use host.pool_name to make sure key is unique # use backend_key.pool_name to make sure key is unique
pool_key = vol_utils.append_host(host, pool.pool_name) pool_key = vol_utils.append_host(backend_key, pool.pool_name)
new_pool = dict(name=pool_key) new_pool = dict(name=pool_key)
new_pool.update(dict(capabilities=pool.capabilities)) new_pool.update(dict(capabilities=pool.capabilities))
all_pools.append(new_pool) all_pools.append(new_pool)

View File

@ -124,8 +124,8 @@ class SchedulerManager(manager.CleanableManager, manager.Manager):
context, group, context, group,
request_spec_list, request_spec_list,
filter_properties_list) filter_properties_list)
except exception.NoValidHost: except exception.NoValidBackend:
LOG.error(_LE("Could not find a host for consistency group " LOG.error(_LE("Could not find a backend for consistency group "
"%(group_id)s."), "%(group_id)s."),
{'group_id': group.id}) {'group_id': group.id})
group.status = 'error' group.status = 'error'
@ -149,8 +149,8 @@ class SchedulerManager(manager.CleanableManager, manager.Manager):
request_spec_list, request_spec_list,
group_filter_properties, group_filter_properties,
filter_properties_list) filter_properties_list)
except exception.NoValidHost: except exception.NoValidBackend:
LOG.error(_LE("Could not find a host for group " LOG.error(_LE("Could not find a backend for group "
"%(group_id)s."), "%(group_id)s."),
{'group_id': group.id}) {'group_id': group.id})
group.status = 'error' group.status = 'error'
@ -198,7 +198,7 @@ class SchedulerManager(manager.CleanableManager, manager.Manager):
def migrate_volume(self, context, volume, backend, force_copy, def migrate_volume(self, context, volume, backend, force_copy,
request_spec, filter_properties): request_spec, filter_properties):
"""Ensure that the host exists and can accept the volume.""" """Ensure that the backend exists and can accept the volume."""
self._wait_for_scheduler() self._wait_for_scheduler()
def _migrate_volume_set_error(self, context, ex, request_spec): def _migrate_volume_set_error(self, context, ex, request_spec):
@ -214,10 +214,10 @@ class SchedulerManager(manager.CleanableManager, manager.Manager):
context, ex, request_spec) context, ex, request_spec)
try: try:
tgt_backend = self.driver.host_passes_filters(context, backend, tgt_backend = self.driver.backend_passes_filters(context, backend,
request_spec, request_spec,
filter_properties) filter_properties)
except exception.NoValidHost as ex: except exception.NoValidBackend as ex:
_migrate_volume_set_error(self, context, ex, request_spec) _migrate_volume_set_error(self, context, ex, request_spec)
except Exception as ex: except Exception as ex:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
@ -269,19 +269,20 @@ class SchedulerManager(manager.CleanableManager, manager.Manager):
migration_policy = 'never' migration_policy = 'never'
try: try:
tgt_host = self.driver.find_retype_host(context, request_spec, tgt_backend = self.driver.find_retype_backend(context,
filter_properties, request_spec,
migration_policy) filter_properties,
migration_policy)
except Exception as ex: except Exception as ex:
# Not having a valid host is an expected exception, so we don't # Not having a valid host is an expected exception, so we don't
# reraise on it. # reraise on it.
reraise = not isinstance(ex, exception.NoValidHost) reraise = not isinstance(ex, exception.NoValidBackend)
with excutils.save_and_reraise_exception(reraise=reraise): with excutils.save_and_reraise_exception(reraise=reraise):
_retype_volume_set_error(self, context, ex, request_spec, _retype_volume_set_error(self, context, ex, request_spec,
volume, reservations) volume, reservations)
else: else:
volume_rpcapi.VolumeAPI().retype(context, volume, volume_rpcapi.VolumeAPI().retype(context, volume,
new_type['id'], tgt_host, new_type['id'], tgt_backend,
migration_policy, migration_policy,
reservations, reservations,
old_reservations) old_reservations)
@ -298,11 +299,11 @@ class SchedulerManager(manager.CleanableManager, manager.Manager):
context, ex, request_spec) context, ex, request_spec)
try: try:
self.driver.host_passes_filters(context, self.driver.backend_passes_filters(context,
volume.service_topic_queue, volume.service_topic_queue,
request_spec, request_spec,
filter_properties) filter_properties)
except exception.NoValidHost as ex: except exception.NoValidBackend as ex:
_manage_existing_set_error(self, context, ex, request_spec) _manage_existing_set_error(self, context, ex, request_spec)
except Exception as ex: except Exception as ex:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
@ -333,12 +334,12 @@ class SchedulerManager(manager.CleanableManager, manager.Manager):
filter_properties['new_size'] = new_size filter_properties['new_size'] = new_size
try: try:
self.driver.host_passes_filters(context, self.driver.backend_passes_filters(context,
volume.service_topic_queue, volume.service_topic_queue,
request_spec, filter_properties) request_spec, filter_properties)
volume_rpcapi.VolumeAPI().extend_volume(context, volume, new_size, volume_rpcapi.VolumeAPI().extend_volume(context, volume, new_size,
reservations) reservations)
except exception.NoValidHost as ex: except exception.NoValidBackend as ex:
QUOTAS.rollback(context, reservations, QUOTAS.rollback(context, reservations,
project_id=volume.project_id) project_id=volume.project_id)
_extend_volume_set_error(self, context, ex, request_spec) _extend_volume_set_error(self, context, ex, request_spec)

View File

@ -413,11 +413,13 @@ class ModelsObjectComparatorMixin(object):
def _assertEqualListsOfObjects(self, objs1, objs2, ignored_keys=None, def _assertEqualListsOfObjects(self, objs1, objs2, ignored_keys=None,
msg=None): msg=None):
obj_to_dict = lambda o: self._dict_from_object(o, ignored_keys) obj_to_dict = lambda o: self._dict_from_object(o, ignored_keys)
sort_key = lambda d: [d[k] for k in sorted(d)] objs1 = map(obj_to_dict, objs1)
conv_and_sort = lambda obj: sorted(map(obj_to_dict, obj), key=sort_key) objs2 = list(map(obj_to_dict, objs2))
# We don't care about the order of the lists, as long as they are in
self.assertListEqual(conv_and_sort(objs1), conv_and_sort(objs2), for obj1 in objs1:
msg=msg) self.assertIn(obj1, objs2)
objs2.remove(obj1)
self.assertEqual([], objs2)
def _assertEqualListsOfPrimitivesAsSets(self, primitives1, primitives2): def _assertEqualListsOfPrimitivesAsSets(self, primitives1, primitives2):
self.assertEqual(len(primitives1), len(primitives2)) self.assertEqual(len(primitives1), len(primitives2))

View File

@ -62,6 +62,7 @@ class BaseAdminTest(test.TestCase):
def _create_volume(self, context, updates=None): def _create_volume(self, context, updates=None):
db_volume = {'status': 'available', db_volume = {'status': 'available',
'host': 'test', 'host': 'test',
'binary': 'cinder-volume',
'availability_zone': 'fake_zone', 'availability_zone': 'fake_zone',
'attach_status': fields.VolumeAttachStatus.DETACHED} 'attach_status': fields.VolumeAttachStatus.DETACHED}
if updates: if updates:
@ -502,10 +503,12 @@ class AdminActionsTest(BaseAdminTest):
db.service_create(self.ctx, db.service_create(self.ctx,
{'host': 'test', {'host': 'test',
'topic': constants.VOLUME_TOPIC, 'topic': constants.VOLUME_TOPIC,
'binary': 'cinder-volume',
'created_at': timeutils.utcnow()}) 'created_at': timeutils.utcnow()})
db.service_create(self.ctx, db.service_create(self.ctx,
{'host': 'test2', {'host': 'test2',
'topic': constants.VOLUME_TOPIC, 'topic': constants.VOLUME_TOPIC,
'binary': 'cinder-volume',
'created_at': timeutils.utcnow()}) 'created_at': timeutils.utcnow()})
db.service_create(self.ctx, db.service_create(self.ctx,
{'host': 'clustered_host', {'host': 'clustered_host',

View File

@ -36,6 +36,7 @@ def _get_filters_sentinel():
'race_preventer': mock.sentinel.race_preventer, 'race_preventer': mock.sentinel.race_preventer,
'last_heartbeat': mock.sentinel.last_heartbeat, 'last_heartbeat': mock.sentinel.last_heartbeat,
'num_hosts': mock.sentinel.num_hosts, 'num_hosts': mock.sentinel.num_hosts,
'name_match_level': mock.sentinel.name_match_level,
'num_down_hosts': mock.sentinel.num_down_hosts} 'num_down_hosts': mock.sentinel.num_down_hosts}

View File

@ -90,9 +90,9 @@ class FakeHostManager(host_manager.HostManager):
} }
class FakeHostState(host_manager.HostState): class FakeBackendState(host_manager.BackendState):
def __init__(self, host, attribute_dict): def __init__(self, host, attribute_dict):
super(FakeHostState, self).__init__(host, None) super(FakeBackendState, self).__init__(host, None)
for (key, val) in attribute_dict.items(): for (key, val) in attribute_dict.items():
setattr(self, key, val) setattr(self, key, val)

View File

@ -42,11 +42,11 @@ class AllocatedCapacityWeigherTestCase(test.TestCase):
weight_properties)[0] weight_properties)[0]
@mock.patch('cinder.db.sqlalchemy.api.service_get_all') @mock.patch('cinder.db.sqlalchemy.api.service_get_all')
def _get_all_hosts(self, _mock_service_get_all, disabled=False): def _get_all_backends(self, _mock_service_get_all, disabled=False):
ctxt = context.get_admin_context() ctxt = context.get_admin_context()
fakes.mock_host_manager_db_calls(_mock_service_get_all, fakes.mock_host_manager_db_calls(_mock_service_get_all,
disabled=disabled) disabled=disabled)
host_states = self.host_manager.get_all_host_states(ctxt) host_states = self.host_manager.get_all_backend_states(ctxt)
_mock_service_get_all.assert_called_once_with( _mock_service_get_all.assert_called_once_with(
ctxt, ctxt,
None, # backend_match_level None, # backend_match_level
@ -54,7 +54,7 @@ class AllocatedCapacityWeigherTestCase(test.TestCase):
return host_states return host_states
def test_default_of_spreading_first(self): def test_default_of_spreading_first(self):
hostinfo_list = self._get_all_hosts() hostinfo_list = self._get_all_backends()
# host1: allocated_capacity_gb=0, weight=0 Norm=0.0 # host1: allocated_capacity_gb=0, weight=0 Norm=0.0
# host2: allocated_capacity_gb=1748, weight=-1748 # host2: allocated_capacity_gb=1748, weight=-1748
@ -70,7 +70,7 @@ class AllocatedCapacityWeigherTestCase(test.TestCase):
def test_capacity_weight_multiplier1(self): def test_capacity_weight_multiplier1(self):
self.flags(allocated_capacity_weight_multiplier=1.0) self.flags(allocated_capacity_weight_multiplier=1.0)
hostinfo_list = self._get_all_hosts() hostinfo_list = self._get_all_backends()
# host1: allocated_capacity_gb=0, weight=0 Norm=0.0 # host1: allocated_capacity_gb=0, weight=0 Norm=0.0
# host2: allocated_capacity_gb=1748, weight=1748 # host2: allocated_capacity_gb=1748, weight=1748
@ -86,7 +86,7 @@ class AllocatedCapacityWeigherTestCase(test.TestCase):
def test_capacity_weight_multiplier2(self): def test_capacity_weight_multiplier2(self):
self.flags(allocated_capacity_weight_multiplier=-2.0) self.flags(allocated_capacity_weight_multiplier=-2.0)
hostinfo_list = self._get_all_hosts() hostinfo_list = self._get_all_backends()
# host1: allocated_capacity_gb=0, weight=0 Norm=0.0 # host1: allocated_capacity_gb=0, weight=0 Norm=0.0
# host2: allocated_capacity_gb=1748, weight=-3496 # host2: allocated_capacity_gb=1748, weight=-3496

View File

@ -178,8 +178,8 @@ class TestBaseFilterHandler(test.TestCase):
def test_get_filtered_objects_info_and_debug_log_none_returned(self): def test_get_filtered_objects_info_and_debug_log_none_returned(self):
all_filters = [FilterA, FilterA, FilterB] all_filters = [FilterA, FilterA, FilterB]
fake_hosts = [host_manager.HostState('fake_host%s' % x, None) fake_backends = [host_manager.BackendState('fake_be%s' % x, None)
for x in range(1, 4)] for x in range(1, 4)]
filt_props = {"request_spec": {'volume_id': fake.VOLUME_ID, filt_props = {"request_spec": {'volume_id': fake.VOLUME_ID,
'volume_properties': {'project_id': fake.PROJECT_ID, 'volume_properties': {'project_id': fake.PROJECT_ID,
@ -187,7 +187,7 @@ class TestBaseFilterHandler(test.TestCase):
'host': 'host4'}}} 'host': 'host4'}}}
with mock.patch.object(base_filter, 'LOG') as mock_log: with mock.patch.object(base_filter, 'LOG') as mock_log:
result = self.handler.get_filtered_objects( result = self.handler.get_filtered_objects(
all_filters, fake_hosts, filt_props) all_filters, fake_backends, filt_props)
self.assertFalse(result) self.assertFalse(result)
msg = "with volume ID '%s'" % fake.VOLUME_ID msg = "with volume ID '%s'" % fake.VOLUME_ID
# FilterA should leave Host1 and Host2; FilterB should leave None. # FilterA should leave Host1 and Host2; FilterB should leave None.
@ -197,8 +197,8 @@ class TestBaseFilterHandler(test.TestCase):
self.assertIn(msg, cargs) self.assertIn(msg, cargs)
self.assertIn(exp_output, cargs) self.assertIn(exp_output, cargs)
exp_output = ("[('FilterA', ['fake_host2', 'fake_host3']), " exp_output = ("[('FilterA', ['fake_be2', 'fake_be3']), "
"('FilterA', ['fake_host3']), " "('FilterA', ['fake_be3']), "
+ "('FilterB', None)]") + "('FilterB', None)]")
cargs = mock_log.debug.call_args[0][0] cargs = mock_log.debug.call_args[0][0]
self.assertIn(msg, cargs) self.assertIn(msg, cargs)

View File

@ -45,16 +45,16 @@ class CapacityWeigherTestCase(test.TestCase):
weight_properties) weight_properties)
@mock.patch('cinder.db.sqlalchemy.api.service_get_all') @mock.patch('cinder.db.sqlalchemy.api.service_get_all')
def _get_all_hosts(self, _mock_service_get_all, disabled=False): def _get_all_backends(self, _mock_service_get_all, disabled=False):
ctxt = context.get_admin_context() ctxt = context.get_admin_context()
fakes.mock_host_manager_db_calls(_mock_service_get_all, fakes.mock_host_manager_db_calls(_mock_service_get_all,
disabled=disabled) disabled=disabled)
host_states = self.host_manager.get_all_host_states(ctxt) backend_states = self.host_manager.get_all_backend_states(ctxt)
_mock_service_get_all.assert_called_once_with( _mock_service_get_all.assert_called_once_with(
ctxt, ctxt,
None, # backend_match_level None, # backend_match_level
topic=constants.VOLUME_TOPIC, disabled=disabled) topic=constants.VOLUME_TOPIC, disabled=disabled)
return host_states return backend_states
# If thin and thin_provisioning_support are True, # If thin and thin_provisioning_support are True,
# use the following formula: # use the following formula:
@ -78,7 +78,7 @@ class CapacityWeigherTestCase(test.TestCase):
) )
@ddt.unpack @ddt.unpack
def test_default_of_spreading_first(self, volume_type, winner): def test_default_of_spreading_first(self, volume_type, winner):
hostinfo_list = self._get_all_hosts() backend_info_list = self._get_all_backends()
# Results for the 1st test # Results for the 1st test
# {'provisioning:type': 'thin'}: # {'provisioning:type': 'thin'}:
@ -106,7 +106,7 @@ class CapacityWeigherTestCase(test.TestCase):
'volume_type': volume_type, 'volume_type': volume_type,
} }
weighed_host = self._get_weighed_hosts( weighed_host = self._get_weighed_hosts(
hostinfo_list, backend_info_list,
weight_properties=weight_properties)[0] weight_properties=weight_properties)[0]
self.assertEqual(1.0, weighed_host.weight) self.assertEqual(1.0, weighed_host.weight)
self.assertEqual(winner, utils.extract_host(weighed_host.obj.host)) self.assertEqual(winner, utils.extract_host(weighed_host.obj.host))
@ -126,7 +126,7 @@ class CapacityWeigherTestCase(test.TestCase):
@ddt.unpack @ddt.unpack
def test_capacity_weight_multiplier1(self, volume_type, winner): def test_capacity_weight_multiplier1(self, volume_type, winner):
self.flags(capacity_weight_multiplier=-1.0) self.flags(capacity_weight_multiplier=-1.0)
hostinfo_list = self._get_all_hosts() backend_info_list = self._get_all_backends()
# Results for the 1st test # Results for the 1st test
# {'provisioning:type': 'thin'}: # {'provisioning:type': 'thin'}:
@ -154,7 +154,7 @@ class CapacityWeigherTestCase(test.TestCase):
'volume_type': volume_type, 'volume_type': volume_type,
} }
weighed_host = self._get_weighed_hosts( weighed_host = self._get_weighed_hosts(
hostinfo_list, backend_info_list,
weight_properties=weight_properties)[0] weight_properties=weight_properties)[0]
self.assertEqual(0.0, weighed_host.weight) self.assertEqual(0.0, weighed_host.weight)
self.assertEqual(winner, utils.extract_host(weighed_host.obj.host)) self.assertEqual(winner, utils.extract_host(weighed_host.obj.host))
@ -174,7 +174,7 @@ class CapacityWeigherTestCase(test.TestCase):
@ddt.unpack @ddt.unpack
def test_capacity_weight_multiplier2(self, volume_type, winner): def test_capacity_weight_multiplier2(self, volume_type, winner):
self.flags(capacity_weight_multiplier=2.0) self.flags(capacity_weight_multiplier=2.0)
hostinfo_list = self._get_all_hosts() backend_info_list = self._get_all_backends()
# Results for the 1st test # Results for the 1st test
# {'provisioning:type': 'thin'}: # {'provisioning:type': 'thin'}:
@ -202,7 +202,7 @@ class CapacityWeigherTestCase(test.TestCase):
'volume_type': volume_type, 'volume_type': volume_type,
} }
weighed_host = self._get_weighed_hosts( weighed_host = self._get_weighed_hosts(
hostinfo_list, backend_info_list,
weight_properties=weight_properties)[0] weight_properties=weight_properties)[0]
self.assertEqual(1.0 * 2, weighed_host.weight) self.assertEqual(1.0 * 2, weighed_host.weight)
self.assertEqual(winner, utils.extract_host(weighed_host.obj.host)) self.assertEqual(winner, utils.extract_host(weighed_host.obj.host))
@ -210,7 +210,7 @@ class CapacityWeigherTestCase(test.TestCase):
def test_capacity_weight_no_unknown_or_infinite(self): def test_capacity_weight_no_unknown_or_infinite(self):
self.flags(capacity_weight_multiplier=-1.0) self.flags(capacity_weight_multiplier=-1.0)
del self.host_manager.service_states['host5'] del self.host_manager.service_states['host5']
hostinfo_list = self._get_all_hosts() backend_info_list = self._get_all_backends()
# host1: thin_provisioning_support = False # host1: thin_provisioning_support = False
# free_capacity_gb=1024, # free_capacity_gb=1024,
@ -229,7 +229,7 @@ class CapacityWeigherTestCase(test.TestCase):
# Norm=0.0 # Norm=0.0
# so, host4 should win: # so, host4 should win:
weighed_hosts = self._get_weighed_hosts(hostinfo_list) weighed_hosts = self._get_weighed_hosts(backend_info_list)
best_host = weighed_hosts[0] best_host = weighed_hosts[0]
self.assertEqual(0.0, best_host.weight) self.assertEqual(0.0, best_host.weight)
self.assertEqual('host4', utils.extract_host(best_host.obj.host)) self.assertEqual('host4', utils.extract_host(best_host.obj.host))
@ -250,7 +250,7 @@ class CapacityWeigherTestCase(test.TestCase):
'thick_provisioning_support': False, 'thick_provisioning_support': False,
'reserved_percentage': 5, 'reserved_percentage': 5,
'timestamp': datetime.utcnow()} 'timestamp': datetime.utcnow()}
hostinfo_list = self._get_all_hosts() backend_info_list = self._get_all_backends()
# host1: thin_provisioning_support = False # host1: thin_provisioning_support = False
# free_capacity_gb=1024, # free_capacity_gb=1024,
@ -271,7 +271,7 @@ class CapacityWeigherTestCase(test.TestCase):
# Norm=-1.0 # Norm=-1.0
# so, host4 should win: # so, host4 should win:
weighed_hosts = self._get_weighed_hosts(hostinfo_list) weighed_hosts = self._get_weighed_hosts(backend_info_list)
best_host = weighed_hosts[0] best_host = weighed_hosts[0]
self.assertEqual(0.0, best_host.weight) self.assertEqual(0.0, best_host.weight)
self.assertEqual('host4', utils.extract_host(best_host.obj.host)) self.assertEqual('host4', utils.extract_host(best_host.obj.host))
@ -292,7 +292,7 @@ class CapacityWeigherTestCase(test.TestCase):
'thick_provisioning_support': False, 'thick_provisioning_support': False,
'reserved_percentage': 5, 'reserved_percentage': 5,
'timestamp': datetime.utcnow()} 'timestamp': datetime.utcnow()}
hostinfo_list = self._get_all_hosts() backend_info_list = self._get_all_backends()
# host1: thin_provisioning_support = False # host1: thin_provisioning_support = False
# free_capacity_gb=1024, # free_capacity_gb=1024,
@ -313,7 +313,7 @@ class CapacityWeigherTestCase(test.TestCase):
# Norm=-1.0 # Norm=-1.0
# so, host4 should win: # so, host4 should win:
weighed_hosts = self._get_weighed_hosts(hostinfo_list) weighed_hosts = self._get_weighed_hosts(backend_info_list)
best_host = weighed_hosts[0] best_host = weighed_hosts[0]
self.assertEqual(0.0, best_host.weight) self.assertEqual(0.0, best_host.weight)
self.assertEqual('host4', utils.extract_host(best_host.obj.host)) self.assertEqual('host4', utils.extract_host(best_host.obj.host))
@ -334,7 +334,7 @@ class CapacityWeigherTestCase(test.TestCase):
'thick_provisioning_support': False, 'thick_provisioning_support': False,
'reserved_percentage': 5, 'reserved_percentage': 5,
'timestamp': datetime.utcnow()} 'timestamp': datetime.utcnow()}
hostinfo_list = self._get_all_hosts() backend_info_list = self._get_all_backends()
# host1: thin_provisioning_support = False # host1: thin_provisioning_support = False
# free_capacity_gb=1024, # free_capacity_gb=1024,
@ -355,7 +355,7 @@ class CapacityWeigherTestCase(test.TestCase):
# Norm=-1.0 # Norm=-1.0
# so, host4 should win: # so, host4 should win:
weighed_hosts = self._get_weighed_hosts(hostinfo_list) weighed_hosts = self._get_weighed_hosts(backend_info_list)
best_host = weighed_hosts[0] best_host = weighed_hosts[0]
self.assertEqual(0.0, best_host.weight) self.assertEqual(0.0, best_host.weight)
self.assertEqual('host4', utils.extract_host(best_host.obj.host)) self.assertEqual('host4', utils.extract_host(best_host.obj.host))
@ -376,7 +376,7 @@ class CapacityWeigherTestCase(test.TestCase):
'thick_provisioning_support': False, 'thick_provisioning_support': False,
'reserved_percentage': 5, 'reserved_percentage': 5,
'timestamp': datetime.utcnow()} 'timestamp': datetime.utcnow()}
hostinfo_list = self._get_all_hosts() backend_info_list = self._get_all_backends()
# host1: thin_provisioning_support = False # host1: thin_provisioning_support = False
# free_capacity_gb=1024, # free_capacity_gb=1024,
@ -397,7 +397,7 @@ class CapacityWeigherTestCase(test.TestCase):
# Norm=-1.0 # Norm=-1.0
# so, host4 should win: # so, host4 should win:
weighed_hosts = self._get_weighed_hosts(hostinfo_list) weighed_hosts = self._get_weighed_hosts(backend_info_list)
best_host = weighed_hosts[0] best_host = weighed_hosts[0]
self.assertEqual(0.0, best_host.weight) self.assertEqual(0.0, best_host.weight)
self.assertEqual('host4', utils.extract_host(best_host.obj.host)) self.assertEqual('host4', utils.extract_host(best_host.obj.host))

View File

@ -50,7 +50,7 @@ class ChanceWeigherTestCase(test.TestCase):
# ensure HostManager can load the ChanceWeigher # ensure HostManager can load the ChanceWeigher
# via the entry points mechanism # via the entry points mechanism
hm = host_manager.HostManager() hm = host_manager.HostManager()
weighers = hm._choose_host_weighers('ChanceWeigher') weighers = hm._choose_backend_weighers('ChanceWeigher')
self.assertEqual(1, len(weighers)) self.assertEqual(1, len(weighers))
self.assertEqual(weighers[0], chance.ChanceWeigher) self.assertEqual(weighers[0], chance.ChanceWeigher)
@ -58,7 +58,8 @@ class ChanceWeigherTestCase(test.TestCase):
# ensure we don't lose any hosts when weighing with # ensure we don't lose any hosts when weighing with
# the ChanceWeigher # the ChanceWeigher
hm = host_manager.HostManager() hm = host_manager.HostManager()
fake_hosts = [host_manager.HostState('fake_host%s' % x, None) fake_backends = [host_manager.BackendState('fake_be%s' % x, None)
for x in range(1, 5)] for x in range(1, 5)]
weighed_hosts = hm.get_weighed_hosts(fake_hosts, {}, 'ChanceWeigher') weighed_backends = hm.get_weighed_backends(fake_backends, {},
self.assertEqual(4, len(weighed_hosts)) 'ChanceWeigher')
self.assertEqual(4, len(weighed_backends))

View File

@ -35,7 +35,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
driver_cls = filter_scheduler.FilterScheduler driver_cls = filter_scheduler.FilterScheduler
def test_create_group_no_hosts(self): def test_create_group_no_hosts(self):
# Ensure empty hosts result in NoValidHosts exception. # Ensure empty hosts result in NoValidBackend exception.
sched = fakes.FakeFilterScheduler() sched = fakes.FakeFilterScheduler()
fake_context = context.RequestContext('user', 'project') fake_context = context.RequestContext('user', 'project')
@ -51,7 +51,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
group_spec = {'group_type': {'name': 'GrpType'}, group_spec = {'group_type': {'name': 'GrpType'},
'volume_properties': {'project_id': 1, 'volume_properties': {'project_id': 1,
'size': 0}} 'size': 0}}
self.assertRaises(exception.NoValidHost, self.assertRaises(exception.NoValidBackend,
sched.schedule_create_group, sched.schedule_create_group,
fake_context, 'faki-id1', group_spec, fake_context, 'faki-id1', group_spec,
request_spec_list, {}, []) request_spec_list, {}, [])
@ -87,7 +87,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.assertTrue(_mock_service_get_all.called) self.assertTrue(_mock_service_get_all.called)
def test_create_consistencygroup_no_hosts(self): def test_create_consistencygroup_no_hosts(self):
# Ensure empty hosts result in NoValidHosts exception. # Ensure empty hosts result in NoValidBackend exception.
sched = fakes.FakeFilterScheduler() sched = fakes.FakeFilterScheduler()
fake_context = context.RequestContext('user', 'project') fake_context = context.RequestContext('user', 'project')
@ -100,7 +100,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
'volume_type': {'name': 'Type2', 'volume_type': {'name': 'Type2',
'extra_specs': {}}} 'extra_specs': {}}}
request_spec_list = [request_spec, request_spec2] request_spec_list = [request_spec, request_spec2]
self.assertRaises(exception.NoValidHost, self.assertRaises(exception.NoValidBackend,
sched.schedule_create_consistencygroup, sched.schedule_create_consistencygroup,
fake_context, 'faki-id1', request_spec_list, {}) fake_context, 'faki-id1', request_spec_list, {})
@ -161,7 +161,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.assertTrue(_mock_service_get_all.called) self.assertTrue(_mock_service_get_all.called)
def test_create_volume_no_hosts(self): def test_create_volume_no_hosts(self):
# Ensure empty hosts/child_zones result in NoValidHosts exception. # Ensure empty hosts/child_zones result in NoValidBackend exception.
sched = fakes.FakeFilterScheduler() sched = fakes.FakeFilterScheduler()
fake_context = context.RequestContext('user', 'project') fake_context = context.RequestContext('user', 'project')
@ -170,8 +170,9 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
'volume_type': {'name': 'LVM_iSCSI'}, 'volume_type': {'name': 'LVM_iSCSI'},
'volume_id': fake.VOLUME_ID} 'volume_id': fake.VOLUME_ID}
request_spec = objects.RequestSpec.from_primitives(request_spec) request_spec = objects.RequestSpec.from_primitives(request_spec)
self.assertRaises(exception.NoValidHost, sched.schedule_create_volume, self.assertRaises(exception.NoValidBackend,
fake_context, request_spec, {}) sched.schedule_create_volume, fake_context,
request_spec, {})
def test_create_volume_no_hosts_invalid_req(self): def test_create_volume_no_hosts_invalid_req(self):
sched = fakes.FakeFilterScheduler() sched = fakes.FakeFilterScheduler()
@ -183,7 +184,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
'size': 1}, 'size': 1},
'volume_type': {'name': 'LVM_iSCSI'}} 'volume_type': {'name': 'LVM_iSCSI'}}
request_spec = objects.RequestSpec.from_primitives(request_spec) request_spec = objects.RequestSpec.from_primitives(request_spec)
self.assertRaises(exception.NoValidHost, self.assertRaises(exception.NoValidBackend,
sched.schedule_create_volume, sched.schedule_create_volume,
fake_context, fake_context,
request_spec, request_spec,
@ -199,15 +200,15 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
'size': 1}, 'size': 1},
'volume_id': fake.VOLUME_ID} 'volume_id': fake.VOLUME_ID}
request_spec = objects.RequestSpec.from_primitives(request_spec) request_spec = objects.RequestSpec.from_primitives(request_spec)
self.assertRaises(exception.NoValidHost, self.assertRaises(exception.NoValidBackend,
sched.schedule_create_volume, sched.schedule_create_volume,
fake_context, fake_context,
request_spec, request_spec,
{}) {})
@mock.patch('cinder.scheduler.host_manager.HostManager.' @mock.patch('cinder.scheduler.host_manager.HostManager.'
'get_all_host_states') 'get_all_backend_states')
def test_create_volume_non_admin(self, _mock_get_all_host_states): def test_create_volume_non_admin(self, _mock_get_all_backend_states):
# Test creating a volume locally using create_volume, passing # Test creating a volume locally using create_volume, passing
# a non-admin context. DB actions should work. # a non-admin context. DB actions should work.
self.was_admin = False self.was_admin = False
@ -219,7 +220,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
return {} return {}
sched = fakes.FakeFilterScheduler() sched = fakes.FakeFilterScheduler()
_mock_get_all_host_states.side_effect = fake_get _mock_get_all_backend_states.side_effect = fake_get
fake_context = context.RequestContext('user', 'project') fake_context = context.RequestContext('user', 'project')
@ -228,8 +229,9 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
'volume_type': {'name': 'LVM_iSCSI'}, 'volume_type': {'name': 'LVM_iSCSI'},
'volume_id': fake.VOLUME_ID} 'volume_id': fake.VOLUME_ID}
request_spec = objects.RequestSpec.from_primitives(request_spec) request_spec = objects.RequestSpec.from_primitives(request_spec)
self.assertRaises(exception.NoValidHost, sched.schedule_create_volume, self.assertRaises(exception.NoValidBackend,
fake_context, request_spec, {}) sched.schedule_create_volume, fake_context,
request_spec, {})
self.assertTrue(self.was_admin) self.assertTrue(self.was_admin)
@mock.patch('cinder.db.service_get_all') @mock.patch('cinder.db.service_get_all')
@ -393,38 +395,38 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
retry = dict(num_attempts=2) retry = dict(num_attempts=2)
filter_properties = dict(retry=retry) filter_properties = dict(retry=retry)
self.assertRaises(exception.NoValidHost, sched._schedule, self.context, self.assertRaises(exception.NoValidBackend, sched._schedule,
request_spec, filter_properties=filter_properties) self.context, request_spec,
filter_properties=filter_properties)
def test_add_retry_host(self): def test_add_retry_backend(self):
retry = dict(num_attempts=1, hosts=[]) retry = dict(num_attempts=1, backends=[])
filter_properties = dict(retry=retry) filter_properties = dict(retry=retry)
host = "fakehost" backend = "fakehost"
sched = fakes.FakeFilterScheduler() sched = fakes.FakeFilterScheduler()
sched._add_retry_host(filter_properties, host) sched._add_retry_backend(filter_properties, backend)
hosts = filter_properties['retry']['hosts'] backends = filter_properties['retry']['backends']
self.assertEqual(1, len(hosts)) self.assertListEqual([backend], backends)
self.assertEqual(host, hosts[0])
def test_post_select_populate(self): def test_post_select_populate(self):
# Test addition of certain filter props after a node is selected. # Test addition of certain filter props after a node is selected.
retry = {'hosts': [], 'num_attempts': 1} retry = {'backends': [], 'num_attempts': 1}
filter_properties = {'retry': retry} filter_properties = {'retry': retry}
sched = fakes.FakeFilterScheduler() sched = fakes.FakeFilterScheduler()
host_state = host_manager.HostState('host', None) backend_state = host_manager.BackendState('host', None)
host_state.total_capacity_gb = 1024 backend_state.total_capacity_gb = 1024
sched._post_select_populate_filter_properties(filter_properties, sched._post_select_populate_filter_properties(filter_properties,
host_state) backend_state)
self.assertEqual('host', self.assertEqual('host',
filter_properties['retry']['hosts'][0]) filter_properties['retry']['backends'][0])
self.assertEqual(1024, host_state.total_capacity_gb) self.assertEqual(1024, backend_state.total_capacity_gb)
def _host_passes_filters_setup(self, mock_obj): def _backend_passes_filters_setup(self, mock_obj):
sched = fakes.FakeFilterScheduler() sched = fakes.FakeFilterScheduler()
sched.host_manager = fakes.FakeHostManager() sched.host_manager = fakes.FakeHostManager()
fake_context = context.RequestContext('user', 'project', fake_context = context.RequestContext('user', 'project',
@ -435,48 +437,48 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
return (sched, fake_context) return (sched, fake_context)
@mock.patch('cinder.db.service_get_all') @mock.patch('cinder.db.service_get_all')
def test_host_passes_filters_happy_day(self, _mock_service_get_topic): def test_backend_passes_filters_happy_day(self, _mock_service_get_topic):
"""Do a successful pass through of with host_passes_filters().""" """Do a successful pass through of with backend_passes_filters()."""
sched, ctx = self._host_passes_filters_setup( sched, ctx = self._backend_passes_filters_setup(
_mock_service_get_topic) _mock_service_get_topic)
request_spec = {'volume_id': fake.VOLUME_ID, request_spec = {'volume_id': fake.VOLUME_ID,
'volume_type': {'name': 'LVM_iSCSI'}, 'volume_type': {'name': 'LVM_iSCSI'},
'volume_properties': {'project_id': 1, 'volume_properties': {'project_id': 1,
'size': 1}} 'size': 1}}
request_spec = objects.RequestSpec.from_primitives(request_spec) request_spec = objects.RequestSpec.from_primitives(request_spec)
ret_host = sched.host_passes_filters(ctx, 'host1#lvm1', ret_host = sched.backend_passes_filters(ctx, 'host1#lvm1',
request_spec, {}) request_spec, {})
self.assertEqual('host1', utils.extract_host(ret_host.host)) self.assertEqual('host1', utils.extract_host(ret_host.host))
self.assertTrue(_mock_service_get_topic.called) self.assertTrue(_mock_service_get_topic.called)
@mock.patch('cinder.db.service_get_all') @mock.patch('cinder.db.service_get_all')
def test_host_passes_filters_default_pool_happy_day( def test_backend_passes_filters_default_pool_happy_day(
self, _mock_service_get_topic): self, _mock_service_get_topic):
"""Do a successful pass through of with host_passes_filters().""" """Do a successful pass through of with backend_passes_filters()."""
sched, ctx = self._host_passes_filters_setup( sched, ctx = self._backend_passes_filters_setup(
_mock_service_get_topic) _mock_service_get_topic)
request_spec = {'volume_id': fake.VOLUME_ID, request_spec = {'volume_id': fake.VOLUME_ID,
'volume_type': {'name': 'LVM_iSCSI'}, 'volume_type': {'name': 'LVM_iSCSI'},
'volume_properties': {'project_id': 1, 'volume_properties': {'project_id': 1,
'size': 1}} 'size': 1}}
request_spec = objects.RequestSpec.from_primitives(request_spec) request_spec = objects.RequestSpec.from_primitives(request_spec)
ret_host = sched.host_passes_filters(ctx, 'host5#_pool0', ret_host = sched.backend_passes_filters(ctx, 'host5#_pool0',
request_spec, {}) request_spec, {})
self.assertEqual('host5', utils.extract_host(ret_host.host)) self.assertEqual('host5', utils.extract_host(ret_host.host))
self.assertTrue(_mock_service_get_topic.called) self.assertTrue(_mock_service_get_topic.called)
@mock.patch('cinder.db.service_get_all') @mock.patch('cinder.db.service_get_all')
def test_host_passes_filters_no_capacity(self, _mock_service_get_topic): def test_backend_passes_filters_no_capacity(self, _mock_service_get_topic):
"""Fail the host due to insufficient capacity.""" """Fail the host due to insufficient capacity."""
sched, ctx = self._host_passes_filters_setup( sched, ctx = self._backend_passes_filters_setup(
_mock_service_get_topic) _mock_service_get_topic)
request_spec = {'volume_id': fake.VOLUME_ID, request_spec = {'volume_id': fake.VOLUME_ID,
'volume_type': {'name': 'LVM_iSCSI'}, 'volume_type': {'name': 'LVM_iSCSI'},
'volume_properties': {'project_id': 1, 'volume_properties': {'project_id': 1,
'size': 1024}} 'size': 1024}}
request_spec = objects.RequestSpec.from_primitives(request_spec) request_spec = objects.RequestSpec.from_primitives(request_spec)
self.assertRaises(exception.NoValidHost, self.assertRaises(exception.NoValidBackend,
sched.host_passes_filters, sched.backend_passes_filters,
ctx, 'host1#lvm1', request_spec, {}) ctx, 'host1#lvm1', request_spec, {})
self.assertTrue(_mock_service_get_topic.called) self.assertTrue(_mock_service_get_topic.called)
@ -486,7 +488,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
# policy=never. host4 doesn't have enough space to hold an additional # policy=never. host4 doesn't have enough space to hold an additional
# 200GB, but it is already the host of this volume and should not be # 200GB, but it is already the host of this volume and should not be
# counted twice. # counted twice.
sched, ctx = self._host_passes_filters_setup( sched, ctx = self._backend_passes_filters_setup(
_mock_service_get_topic) _mock_service_get_topic)
extra_specs = {'volume_backend_name': 'lvm4'} extra_specs = {'volume_backend_name': 'lvm4'}
request_spec = {'volume_id': fake.VOLUME_ID, request_spec = {'volume_id': fake.VOLUME_ID,
@ -496,9 +498,9 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
'size': 200, 'size': 200,
'host': 'host4#lvm4'}} 'host': 'host4#lvm4'}}
request_spec = objects.RequestSpec.from_primitives(request_spec) request_spec = objects.RequestSpec.from_primitives(request_spec)
host_state = sched.find_retype_host(ctx, request_spec, host_state = sched.find_retype_backend(ctx, request_spec,
filter_properties={}, filter_properties={},
migration_policy='never') migration_policy='never')
self.assertEqual('host4', utils.extract_host(host_state.host)) self.assertEqual('host4', utils.extract_host(host_state.host))
@mock.patch('cinder.db.service_get_all') @mock.patch('cinder.db.service_get_all')
@ -508,7 +510,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
# policy=never. host4 doesn't have enough space to hold an additional # policy=never. host4 doesn't have enough space to hold an additional
# 200GB, but it is already the host of this volume and should not be # 200GB, but it is already the host of this volume and should not be
# counted twice. # counted twice.
sched, ctx = self._host_passes_filters_setup( sched, ctx = self._backend_passes_filters_setup(
_mock_service_get_topic) _mock_service_get_topic)
extra_specs = {'volume_backend_name': 'lvm3'} extra_specs = {'volume_backend_name': 'lvm3'}
request_spec = {'volume_id': fake.VOLUME_ID, request_spec = {'volume_id': fake.VOLUME_ID,
@ -518,16 +520,16 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
'size': 200, 'size': 200,
'host': 'host3#lvm3'}} 'host': 'host3#lvm3'}}
request_spec = objects.RequestSpec.from_primitives(request_spec) request_spec = objects.RequestSpec.from_primitives(request_spec)
host_state = sched.find_retype_host(ctx, request_spec, host_state = sched.find_retype_backend(ctx, request_spec,
filter_properties={}, filter_properties={},
migration_policy='never') migration_policy='never')
self.assertEqual('host3#lvm3', host_state.host) self.assertEqual('host3#lvm3', host_state.host)
@mock.patch('cinder.db.service_get_all') @mock.patch('cinder.db.service_get_all')
def test_retype_policy_never_migrate_fail(self, _mock_service_get_topic): def test_retype_policy_never_migrate_fail(self, _mock_service_get_topic):
# Retype should fail if current host doesn't pass filters and # Retype should fail if current host doesn't pass filters and
# policy=never. # policy=never.
sched, ctx = self._host_passes_filters_setup( sched, ctx = self._backend_passes_filters_setup(
_mock_service_get_topic) _mock_service_get_topic)
extra_specs = {'volume_backend_name': 'lvm1'} extra_specs = {'volume_backend_name': 'lvm1'}
request_spec = {'volume_id': fake.VOLUME_ID, request_spec = {'volume_id': fake.VOLUME_ID,
@ -537,15 +539,15 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
'size': 200, 'size': 200,
'host': 'host4'}} 'host': 'host4'}}
request_spec = objects.RequestSpec.from_primitives(request_spec) request_spec = objects.RequestSpec.from_primitives(request_spec)
self.assertRaises(exception.NoValidHost, sched.find_retype_host, ctx, self.assertRaises(exception.NoValidBackend, sched.find_retype_backend,
request_spec, filter_properties={}, ctx, request_spec, filter_properties={},
migration_policy='never') migration_policy='never')
@mock.patch('cinder.db.service_get_all') @mock.patch('cinder.db.service_get_all')
def test_retype_policy_demand_migrate_pass(self, _mock_service_get_topic): def test_retype_policy_demand_migrate_pass(self, _mock_service_get_topic):
# Retype should pass if current host fails filters but another host # Retype should pass if current host fails filters but another host
# is suitable when policy=on-demand. # is suitable when policy=on-demand.
sched, ctx = self._host_passes_filters_setup( sched, ctx = self._backend_passes_filters_setup(
_mock_service_get_topic) _mock_service_get_topic)
extra_specs = {'volume_backend_name': 'lvm1'} extra_specs = {'volume_backend_name': 'lvm1'}
request_spec = {'volume_id': fake.VOLUME_ID, request_spec = {'volume_id': fake.VOLUME_ID,
@ -555,16 +557,16 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
'size': 200, 'size': 200,
'host': 'host4'}} 'host': 'host4'}}
request_spec = objects.RequestSpec.from_primitives(request_spec) request_spec = objects.RequestSpec.from_primitives(request_spec)
host_state = sched.find_retype_host(ctx, request_spec, host_state = sched.find_retype_backend(ctx, request_spec,
filter_properties={}, filter_properties={},
migration_policy='on-demand') migration_policy='on-demand')
self.assertEqual('host1', utils.extract_host(host_state.host)) self.assertEqual('host1', utils.extract_host(host_state.host))
@mock.patch('cinder.db.service_get_all') @mock.patch('cinder.db.service_get_all')
def test_retype_policy_demand_migrate_fail(self, _mock_service_get_topic): def test_retype_policy_demand_migrate_fail(self, _mock_service_get_topic):
# Retype should fail if current host doesn't pass filters and # Retype should fail if current host doesn't pass filters and
# no other suitable candidates exist even if policy=on-demand. # no other suitable candidates exist even if policy=on-demand.
sched, ctx = self._host_passes_filters_setup( sched, ctx = self._backend_passes_filters_setup(
_mock_service_get_topic) _mock_service_get_topic)
extra_specs = {'volume_backend_name': 'lvm1'} extra_specs = {'volume_backend_name': 'lvm1'}
request_spec = {'volume_id': fake.VOLUME_ID, request_spec = {'volume_id': fake.VOLUME_ID,
@ -574,6 +576,6 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
'size': 2048, 'size': 2048,
'host': 'host4'}} 'host': 'host4'}}
request_spec = objects.RequestSpec.from_primitives(request_spec) request_spec = objects.RequestSpec.from_primitives(request_spec)
self.assertRaises(exception.NoValidHost, sched.find_retype_host, ctx, self.assertRaises(exception.NoValidBackend, sched.find_retype_backend,
request_spec, filter_properties={}, ctx, request_spec, filter_properties={},
migration_policy='on-demand') migration_policy='on-demand')

View File

@ -23,7 +23,7 @@ from cinder.tests.unit.scheduler import fakes
class GoodnessWeigherTestCase(test.TestCase): class GoodnessWeigherTestCase(test.TestCase):
def test_goodness_weigher_with_no_goodness_function(self): def test_goodness_weigher_with_no_goodness_function(self):
weigher = goodness.GoodnessWeigher() weigher = goodness.GoodnessWeigher()
host_state = fakes.FakeHostState('host1', { host_state = fakes.FakeBackendState('host1', {
'host': 'host.example.com', 'host': 'host.example.com',
'capabilities': { 'capabilities': {
'foo': '50' 'foo': '50'
@ -36,19 +36,19 @@ class GoodnessWeigherTestCase(test.TestCase):
def test_goodness_weigher_passing_host(self): def test_goodness_weigher_passing_host(self):
weigher = goodness.GoodnessWeigher() weigher = goodness.GoodnessWeigher()
host_state = fakes.FakeHostState('host1', { host_state = fakes.FakeBackendState('host1', {
'host': 'host.example.com', 'host': 'host.example.com',
'capabilities': { 'capabilities': {
'goodness_function': '100' 'goodness_function': '100'
} }
}) })
host_state_2 = fakes.FakeHostState('host2', { host_state_2 = fakes.FakeBackendState('host2', {
'host': 'host2.example.com', 'host': 'host2.example.com',
'capabilities': { 'capabilities': {
'goodness_function': '0' 'goodness_function': '0'
} }
}) })
host_state_3 = fakes.FakeHostState('host3', { host_state_3 = fakes.FakeBackendState('host3', {
'host': 'host3.example.com', 'host': 'host3.example.com',
'capabilities': { 'capabilities': {
'goodness_function': '100 / 2' 'goodness_function': '100 / 2'
@ -65,7 +65,7 @@ class GoodnessWeigherTestCase(test.TestCase):
def test_goodness_weigher_capabilities_substitution(self): def test_goodness_weigher_capabilities_substitution(self):
weigher = goodness.GoodnessWeigher() weigher = goodness.GoodnessWeigher()
host_state = fakes.FakeHostState('host1', { host_state = fakes.FakeBackendState('host1', {
'host': 'host.example.com', 'host': 'host.example.com',
'capabilities': { 'capabilities': {
'foo': 50, 'foo': 50,
@ -79,7 +79,7 @@ class GoodnessWeigherTestCase(test.TestCase):
def test_goodness_weigher_extra_specs_substitution(self): def test_goodness_weigher_extra_specs_substitution(self):
weigher = goodness.GoodnessWeigher() weigher = goodness.GoodnessWeigher()
host_state = fakes.FakeHostState('host1', { host_state = fakes.FakeBackendState('host1', {
'host': 'host.example.com', 'host': 'host.example.com',
'capabilities': { 'capabilities': {
'goodness_function': '10 + extra.foo' 'goodness_function': '10 + extra.foo'
@ -98,7 +98,7 @@ class GoodnessWeigherTestCase(test.TestCase):
def test_goodness_weigher_volume_substitution(self): def test_goodness_weigher_volume_substitution(self):
weigher = goodness.GoodnessWeigher() weigher = goodness.GoodnessWeigher()
host_state = fakes.FakeHostState('host1', { host_state = fakes.FakeBackendState('host1', {
'host': 'host.example.com', 'host': 'host.example.com',
'capabilities': { 'capabilities': {
'goodness_function': '10 + volume.foo' 'goodness_function': '10 + volume.foo'
@ -117,7 +117,7 @@ class GoodnessWeigherTestCase(test.TestCase):
def test_goodness_weigher_qos_substitution(self): def test_goodness_weigher_qos_substitution(self):
weigher = goodness.GoodnessWeigher() weigher = goodness.GoodnessWeigher()
host_state = fakes.FakeHostState('host1', { host_state = fakes.FakeBackendState('host1', {
'host': 'host.example.com', 'host': 'host.example.com',
'capabilities': { 'capabilities': {
'goodness_function': '10 + qos.foo' 'goodness_function': '10 + qos.foo'
@ -134,7 +134,7 @@ class GoodnessWeigherTestCase(test.TestCase):
def test_goodness_weigher_stats_substitution(self): def test_goodness_weigher_stats_substitution(self):
weigher = goodness.GoodnessWeigher() weigher = goodness.GoodnessWeigher()
host_state = fakes.FakeHostState('host1', { host_state = fakes.FakeBackendState('host1', {
'host': 'host.example.com', 'host': 'host.example.com',
'capabilities': { 'capabilities': {
'goodness_function': 'stats.free_capacity_gb > 20' 'goodness_function': 'stats.free_capacity_gb > 20'
@ -148,7 +148,7 @@ class GoodnessWeigherTestCase(test.TestCase):
def test_goodness_weigher_invalid_substitution(self): def test_goodness_weigher_invalid_substitution(self):
weigher = goodness.GoodnessWeigher() weigher = goodness.GoodnessWeigher()
host_state = fakes.FakeHostState('host1', { host_state = fakes.FakeBackendState('host1', {
'host': 'host.example.com', 'host': 'host.example.com',
'capabilities': { 'capabilities': {
'goodness_function': '10 + stats.my_val' 'goodness_function': '10 + stats.my_val'
@ -162,13 +162,13 @@ class GoodnessWeigherTestCase(test.TestCase):
def test_goodness_weigher_host_rating_out_of_bounds(self): def test_goodness_weigher_host_rating_out_of_bounds(self):
weigher = goodness.GoodnessWeigher() weigher = goodness.GoodnessWeigher()
host_state = fakes.FakeHostState('host1', { host_state = fakes.FakeBackendState('host1', {
'host': 'host.example.com', 'host': 'host.example.com',
'capabilities': { 'capabilities': {
'goodness_function': '-10' 'goodness_function': '-10'
} }
}) })
host_state_2 = fakes.FakeHostState('host2', { host_state_2 = fakes.FakeBackendState('host2', {
'host': 'host2.example.com', 'host': 'host2.example.com',
'capabilities': { 'capabilities': {
'goodness_function': '200' 'goodness_function': '200'
@ -183,7 +183,7 @@ class GoodnessWeigherTestCase(test.TestCase):
def test_goodness_weigher_invalid_goodness_function(self): def test_goodness_weigher_invalid_goodness_function(self):
weigher = goodness.GoodnessWeigher() weigher = goodness.GoodnessWeigher()
host_state = fakes.FakeHostState('host1', { host_state = fakes.FakeBackendState('host1', {
'host': 'host.example.com', 'host': 'host.example.com',
'capabilities': { 'capabilities': {
'goodness_function': '50 / 0' 'goodness_function': '50 / 0'

File diff suppressed because it is too large Load Diff

View File

@ -34,13 +34,13 @@ from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit.objects import test_service from cinder.tests.unit.objects import test_service
class FakeFilterClass1(filters.BaseHostFilter): class FakeFilterClass1(filters.BaseBackendFilter):
def host_passes(self, host_state, filter_properties): def backend_passes(self, host_state, filter_properties):
pass pass
class FakeFilterClass2(filters.BaseHostFilter): class FakeFilterClass2(filters.BaseBackendFilter):
def host_passes(self, host_state, filter_properties): def backend_passes(self, host_state, filter_properties):
pass pass
@ -50,46 +50,46 @@ class HostManagerTestCase(test.TestCase):
def setUp(self): def setUp(self):
super(HostManagerTestCase, self).setUp() super(HostManagerTestCase, self).setUp()
self.host_manager = host_manager.HostManager() self.host_manager = host_manager.HostManager()
self.fake_hosts = [host_manager.HostState('fake_host%s' % x, None) self.fake_backends = [host_manager.BackendState('fake_be%s' % x, None)
for x in range(1, 5)] for x in range(1, 5)]
# For a second scheduler service. # For a second scheduler service.
self.host_manager_1 = host_manager.HostManager() self.host_manager_1 = host_manager.HostManager()
def test_choose_host_filters_not_found(self): def test_choose_backend_filters_not_found(self):
self.flags(scheduler_default_filters='FakeFilterClass3') self.flags(scheduler_default_filters='FakeFilterClass3')
self.host_manager.filter_classes = [FakeFilterClass1, self.host_manager.filter_classes = [FakeFilterClass1,
FakeFilterClass2] FakeFilterClass2]
self.assertRaises(exception.SchedulerHostFilterNotFound, self.assertRaises(exception.SchedulerHostFilterNotFound,
self.host_manager._choose_host_filters, None) self.host_manager._choose_backend_filters, None)
def test_choose_host_filters(self): def test_choose_backend_filters(self):
self.flags(scheduler_default_filters=['FakeFilterClass2']) self.flags(scheduler_default_filters=['FakeFilterClass2'])
self.host_manager.filter_classes = [FakeFilterClass1, self.host_manager.filter_classes = [FakeFilterClass1,
FakeFilterClass2] FakeFilterClass2]
# Test 'volume' returns 1 correct function # Test 'volume' returns 1 correct function
filter_classes = self.host_manager._choose_host_filters(None) filter_classes = self.host_manager._choose_backend_filters(None)
self.assertEqual(1, len(filter_classes)) self.assertEqual(1, len(filter_classes))
self.assertEqual('FakeFilterClass2', filter_classes[0].__name__) self.assertEqual('FakeFilterClass2', filter_classes[0].__name__)
@mock.patch('cinder.scheduler.host_manager.HostManager.' @mock.patch('cinder.scheduler.host_manager.HostManager.'
'_choose_host_filters') '_choose_backend_filters')
def test_get_filtered_hosts(self, _mock_choose_host_filters): def test_get_filtered_backends(self, _mock_choose_backend_filters):
filter_class = FakeFilterClass1 filter_class = FakeFilterClass1
mock_func = mock.Mock() mock_func = mock.Mock()
mock_func.return_value = True mock_func.return_value = True
filter_class._filter_one = mock_func filter_class._filter_one = mock_func
_mock_choose_host_filters.return_value = [filter_class] _mock_choose_backend_filters.return_value = [filter_class]
fake_properties = {'moo': 1, 'cow': 2} fake_properties = {'moo': 1, 'cow': 2}
expected = [] expected = []
for fake_host in self.fake_hosts: for fake_backend in self.fake_backends:
expected.append(mock.call(fake_host, fake_properties)) expected.append(mock.call(fake_backend, fake_properties))
result = self.host_manager.get_filtered_hosts(self.fake_hosts, result = self.host_manager.get_filtered_backends(self.fake_backends,
fake_properties) fake_properties)
self.assertEqual(expected, mock_func.call_args_list) self.assertEqual(expected, mock_func.call_args_list)
self.assertEqual(set(self.fake_hosts), set(result)) self.assertEqual(set(self.fake_backends), set(result))
@mock.patch('cinder.scheduler.host_manager.HostManager._get_updated_pools') @mock.patch('cinder.scheduler.host_manager.HostManager._get_updated_pools')
@mock.patch('oslo_utils.timeutils.utcnow') @mock.patch('oslo_utils.timeutils.utcnow')
@ -563,6 +563,10 @@ class HostManagerTestCase(test.TestCase):
self.assertEqual(1, len(res)) self.assertEqual(1, len(res))
self.assertEqual(dates[1], res[0]['capabilities']['timestamp']) self.assertEqual(dates[1], res[0]['capabilities']['timestamp'])
# Now we simulate old service that doesn't send timestamp
del mocked_service_states['host1']['timestamp']
with mock.patch.dict(self.host_manager.service_states,
mocked_service_states):
self.host_manager.update_service_capabilities(service_name, self.host_manager.update_service_capabilities(service_name,
'host1', 'host1',
host_volume_capabs, host_volume_capabs,
@ -572,8 +576,8 @@ class HostManagerTestCase(test.TestCase):
self.assertEqual(dates[2], res[0]['capabilities']['timestamp']) self.assertEqual(dates[2], res[0]['capabilities']['timestamp'])
@mock.patch('cinder.objects.Service.is_up', True) @mock.patch('cinder.objects.Service.is_up', True)
def test_get_all_host_states_cluster(self): def test_get_all_backend_states_cluster(self):
"""Test get_all_host_states when we have clustered services. """Test get_all_backend_states when we have clustered services.
Confirm that clustered services are grouped and that only the latest Confirm that clustered services are grouped and that only the latest
of the capability reports is relevant. of the capability reports is relevant.
@ -631,7 +635,7 @@ class HostManagerTestCase(test.TestCase):
'volume', services[i].host, capabilities[i][1], 'volume', services[i].host, capabilities[i][1],
services[i].cluster_name, capabilities[i][0]) services[i].cluster_name, capabilities[i][0])
res = self.host_manager.get_all_host_states(ctxt) res = self.host_manager.get_all_backend_states(ctxt)
result = {(s.cluster_name or s.host, s.free_capacity_gb) for s in res} result = {(s.cluster_name or s.host, s.free_capacity_gb) for s in res}
expected = {(cluster_name + '#_pool0', 2000), expected = {(cluster_name + '#_pool0', 2000),
('non_clustered_host#_pool0', 4000)} ('non_clustered_host#_pool0', 4000)}
@ -640,8 +644,8 @@ class HostManagerTestCase(test.TestCase):
@mock.patch('cinder.db.service_get_all') @mock.patch('cinder.db.service_get_all')
@mock.patch('cinder.objects.service.Service.is_up', @mock.patch('cinder.objects.service.Service.is_up',
new_callable=mock.PropertyMock) new_callable=mock.PropertyMock)
def test_get_all_host_states(self, _mock_service_is_up, def test_get_all_backend_states(self, _mock_service_is_up,
_mock_service_get_all): _mock_service_get_all):
context = 'fake_context' context = 'fake_context'
timestamp = datetime.utcnow() timestamp = datetime.utcnow()
topic = constants.VOLUME_TOPIC topic = constants.VOLUME_TOPIC
@ -695,7 +699,7 @@ class HostManagerTestCase(test.TestCase):
host_manager.LOG.warning = _mock_warning host_manager.LOG.warning = _mock_warning
# Get all states # Get all states
self.host_manager.get_all_host_states(context) self.host_manager.get_all_backend_states(context)
_mock_service_get_all.assert_called_with(context, _mock_service_get_all.assert_called_with(context,
disabled=False, disabled=False,
topic=topic) topic=topic)
@ -704,14 +708,14 @@ class HostManagerTestCase(test.TestCase):
expected = [mock.call() for s in service_objs] expected = [mock.call() for s in service_objs]
self.assertEqual(expected, _mock_service_is_up.call_args_list) self.assertEqual(expected, _mock_service_is_up.call_args_list)
# Get host_state_map and make sure we have the first 3 hosts # Get backend_state_map and make sure we have the first 3 hosts
host_state_map = self.host_manager.host_state_map backend_state_map = self.host_manager.backend_state_map
self.assertEqual(3, len(host_state_map)) self.assertEqual(3, len(backend_state_map))
for i in range(3): for i in range(3):
volume_node = services[i] volume_node = services[i]
host = volume_node['host'] host = volume_node['host']
test_service.TestService._compare(self, volume_node, test_service.TestService._compare(self, volume_node,
host_state_map[host].service) backend_state_map[host].service)
# Second test: Now service.is_up returns False for host3 # Second test: Now service.is_up returns False for host3
_mock_service_is_up.reset_mock() _mock_service_is_up.reset_mock()
@ -720,7 +724,7 @@ class HostManagerTestCase(test.TestCase):
_mock_warning.reset_mock() _mock_warning.reset_mock()
# Get all states, make sure host 3 is reported as down # Get all states, make sure host 3 is reported as down
self.host_manager.get_all_host_states(context) self.host_manager.get_all_backend_states(context)
_mock_service_get_all.assert_called_with(context, _mock_service_get_all.assert_called_with(context,
disabled=False, disabled=False,
topic=topic) topic=topic)
@ -728,15 +732,15 @@ class HostManagerTestCase(test.TestCase):
self.assertEqual(expected, _mock_service_is_up.call_args_list) self.assertEqual(expected, _mock_service_is_up.call_args_list)
self.assertGreater(_mock_warning.call_count, 0) self.assertGreater(_mock_warning.call_count, 0)
# Get host_state_map and make sure we have the first 2 hosts (host3 is # Get backend_state_map and make sure we have the first 2 hosts (host3
# down, host4 is missing capabilities) # is down, host4 is missing capabilities)
host_state_map = self.host_manager.host_state_map backend_state_map = self.host_manager.backend_state_map
self.assertEqual(2, len(host_state_map)) self.assertEqual(2, len(backend_state_map))
for i in range(2): for i in range(2):
volume_node = services[i] volume_node = services[i]
host = volume_node['host'] host = volume_node['host']
test_service.TestService._compare(self, volume_node, test_service.TestService._compare(self, volume_node,
host_state_map[host].service) backend_state_map[host].service)
@mock.patch('cinder.db.service_get_all') @mock.patch('cinder.db.service_get_all')
@mock.patch('cinder.objects.service.Service.is_up', @mock.patch('cinder.objects.service.Service.is_up',
@ -963,12 +967,12 @@ class HostManagerTestCase(test.TestCase):
sorted(res2, key=sort_func)) sorted(res2, key=sort_func))
class HostStateTestCase(test.TestCase): class BackendStateTestCase(test.TestCase):
"""Test case for HostState class.""" """Test case for BackendState class."""
def test_update_from_volume_capability_nopool(self): def test_update_from_volume_capability_nopool(self):
fake_host = host_manager.HostState('host1', None) fake_backend = host_manager.BackendState('be1', None)
self.assertIsNone(fake_host.free_capacity_gb) self.assertIsNone(fake_backend.free_capacity_gb)
volume_capability = {'total_capacity_gb': 1024, volume_capability = {'total_capacity_gb': 1024,
'free_capacity_gb': 512, 'free_capacity_gb': 512,
@ -976,34 +980,34 @@ class HostStateTestCase(test.TestCase):
'reserved_percentage': 0, 'reserved_percentage': 0,
'timestamp': None} 'timestamp': None}
fake_host.update_from_volume_capability(volume_capability) fake_backend.update_from_volume_capability(volume_capability)
# Backend level stats remain uninitialized # Backend level stats remain uninitialized
self.assertEqual(0, fake_host.total_capacity_gb) self.assertEqual(0, fake_backend.total_capacity_gb)
self.assertIsNone(fake_host.free_capacity_gb) self.assertIsNone(fake_backend.free_capacity_gb)
# Pool stats has been updated # Pool stats has been updated
self.assertEqual(1024, fake_host.pools['_pool0'].total_capacity_gb) self.assertEqual(1024, fake_backend.pools['_pool0'].total_capacity_gb)
self.assertEqual(512, fake_host.pools['_pool0'].free_capacity_gb) self.assertEqual(512, fake_backend.pools['_pool0'].free_capacity_gb)
self.assertEqual(512, self.assertEqual(512,
fake_host.pools['_pool0'].provisioned_capacity_gb) fake_backend.pools['_pool0'].provisioned_capacity_gb)
# Test update for existing host state # Test update for existing host state
volume_capability.update(dict(total_capacity_gb=1000)) volume_capability.update(dict(total_capacity_gb=1000))
fake_host.update_from_volume_capability(volume_capability) fake_backend.update_from_volume_capability(volume_capability)
self.assertEqual(1000, fake_host.pools['_pool0'].total_capacity_gb) self.assertEqual(1000, fake_backend.pools['_pool0'].total_capacity_gb)
# Test update for existing host state with different backend name # Test update for existing host state with different backend name
volume_capability.update(dict(volume_backend_name='magic')) volume_capability.update(dict(volume_backend_name='magic'))
fake_host.update_from_volume_capability(volume_capability) fake_backend.update_from_volume_capability(volume_capability)
self.assertEqual(1000, fake_host.pools['magic'].total_capacity_gb) self.assertEqual(1000, fake_backend.pools['magic'].total_capacity_gb)
self.assertEqual(512, fake_host.pools['magic'].free_capacity_gb) self.assertEqual(512, fake_backend.pools['magic'].free_capacity_gb)
self.assertEqual(512, self.assertEqual(512,
fake_host.pools['magic'].provisioned_capacity_gb) fake_backend.pools['magic'].provisioned_capacity_gb)
# 'pool0' becomes nonactive pool, and is deleted # 'pool0' becomes nonactive pool, and is deleted
self.assertRaises(KeyError, lambda: fake_host.pools['pool0']) self.assertRaises(KeyError, lambda: fake_backend.pools['pool0'])
def test_update_from_volume_capability_with_pools(self): def test_update_from_volume_capability_with_pools(self):
fake_host = host_manager.HostState('host1', None) fake_backend = host_manager.BackendState('host1', None)
self.assertIsNone(fake_host.free_capacity_gb) self.assertIsNone(fake_backend.free_capacity_gb)
capability = { capability = {
'volume_backend_name': 'Local iSCSI', 'volume_backend_name': 'Local iSCSI',
'vendor_name': 'OpenStack', 'vendor_name': 'OpenStack',
@ -1037,27 +1041,28 @@ class HostStateTestCase(test.TestCase):
'timestamp': None, 'timestamp': None,
} }
fake_host.update_from_volume_capability(capability) fake_backend.update_from_volume_capability(capability)
self.assertEqual('Local iSCSI', fake_host.volume_backend_name) self.assertEqual('Local iSCSI', fake_backend.volume_backend_name)
self.assertEqual('iSCSI', fake_host.storage_protocol) self.assertEqual('iSCSI', fake_backend.storage_protocol)
self.assertEqual('OpenStack', fake_host.vendor_name) self.assertEqual('OpenStack', fake_backend.vendor_name)
self.assertEqual('1.0.1', fake_host.driver_version) self.assertEqual('1.0.1', fake_backend.driver_version)
# Backend level stats remain uninitialized # Backend level stats remain uninitialized
self.assertEqual(0, fake_host.total_capacity_gb) self.assertEqual(0, fake_backend.total_capacity_gb)
self.assertIsNone(fake_host.free_capacity_gb) self.assertIsNone(fake_backend.free_capacity_gb)
# Pool stats has been updated # Pool stats has been updated
self.assertEqual(2, len(fake_host.pools)) self.assertEqual(2, len(fake_backend.pools))
self.assertEqual(500, fake_host.pools['1st pool'].total_capacity_gb) self.assertEqual(500, fake_backend.pools['1st pool'].total_capacity_gb)
self.assertEqual(230, fake_host.pools['1st pool'].free_capacity_gb) self.assertEqual(230, fake_backend.pools['1st pool'].free_capacity_gb)
self.assertEqual(270, self.assertEqual(
fake_host.pools['1st pool'].provisioned_capacity_gb) 270, fake_backend.pools['1st pool'].provisioned_capacity_gb)
self.assertEqual(1024, fake_host.pools['2nd pool'].total_capacity_gb) self.assertEqual(
self.assertEqual(1024, fake_host.pools['2nd pool'].free_capacity_gb) 1024, fake_backend.pools['2nd pool'].total_capacity_gb)
self.assertEqual(0, self.assertEqual(1024, fake_backend.pools['2nd pool'].free_capacity_gb)
fake_host.pools['2nd pool'].provisioned_capacity_gb) self.assertEqual(
0, fake_backend.pools['2nd pool'].provisioned_capacity_gb)
capability = { capability = {
'volume_backend_name': 'Local iSCSI', 'volume_backend_name': 'Local iSCSI',
@ -1077,83 +1082,85 @@ class HostStateTestCase(test.TestCase):
'timestamp': None, 'timestamp': None,
} }
# test update HostState Record # test update BackendState Record
fake_host.update_from_volume_capability(capability) fake_backend.update_from_volume_capability(capability)
self.assertEqual('1.0.2', fake_host.driver_version) self.assertEqual('1.0.2', fake_backend.driver_version)
# Non-active pool stats has been removed # Non-active pool stats has been removed
self.assertEqual(1, len(fake_host.pools)) self.assertEqual(1, len(fake_backend.pools))
self.assertRaises(KeyError, lambda: fake_host.pools['1st pool']) self.assertRaises(KeyError, lambda: fake_backend.pools['1st pool'])
self.assertRaises(KeyError, lambda: fake_host.pools['2nd pool']) self.assertRaises(KeyError, lambda: fake_backend.pools['2nd pool'])
self.assertEqual(10000, fake_host.pools['3rd pool'].total_capacity_gb) self.assertEqual(10000,
self.assertEqual(10000, fake_host.pools['3rd pool'].free_capacity_gb) fake_backend.pools['3rd pool'].total_capacity_gb)
self.assertEqual(0, self.assertEqual(10000,
fake_host.pools['3rd pool'].provisioned_capacity_gb) fake_backend.pools['3rd pool'].free_capacity_gb)
self.assertEqual(
0, fake_backend.pools['3rd pool'].provisioned_capacity_gb)
def test_update_from_volume_infinite_capability(self): def test_update_from_volume_infinite_capability(self):
fake_host = host_manager.HostState('host1', None) fake_backend = host_manager.BackendState('host1', None)
self.assertIsNone(fake_host.free_capacity_gb) self.assertIsNone(fake_backend.free_capacity_gb)
volume_capability = {'total_capacity_gb': 'infinite', volume_capability = {'total_capacity_gb': 'infinite',
'free_capacity_gb': 'infinite', 'free_capacity_gb': 'infinite',
'reserved_percentage': 0, 'reserved_percentage': 0,
'timestamp': None} 'timestamp': None}
fake_host.update_from_volume_capability(volume_capability) fake_backend.update_from_volume_capability(volume_capability)
# Backend level stats remain uninitialized # Backend level stats remain uninitialized
self.assertEqual(0, fake_host.total_capacity_gb) self.assertEqual(0, fake_backend.total_capacity_gb)
self.assertIsNone(fake_host.free_capacity_gb) self.assertIsNone(fake_backend.free_capacity_gb)
# Pool stats has been updated # Pool stats has been updated
self.assertEqual( self.assertEqual(
'infinite', 'infinite',
fake_host.pools['_pool0'].total_capacity_gb) fake_backend.pools['_pool0'].total_capacity_gb)
self.assertEqual( self.assertEqual(
'infinite', 'infinite',
fake_host.pools['_pool0'].free_capacity_gb) fake_backend.pools['_pool0'].free_capacity_gb)
def test_update_from_volume_unknown_capability(self): def test_update_from_volume_unknown_capability(self):
fake_host = host_manager.HostState('host1', None) fake_backend = host_manager.BackendState('host1', None)
self.assertIsNone(fake_host.free_capacity_gb) self.assertIsNone(fake_backend.free_capacity_gb)
volume_capability = {'total_capacity_gb': 'infinite', volume_capability = {'total_capacity_gb': 'infinite',
'free_capacity_gb': 'unknown', 'free_capacity_gb': 'unknown',
'reserved_percentage': 0, 'reserved_percentage': 0,
'timestamp': None} 'timestamp': None}
fake_host.update_from_volume_capability(volume_capability) fake_backend.update_from_volume_capability(volume_capability)
# Backend level stats remain uninitialized # Backend level stats remain uninitialized
self.assertEqual(0, fake_host.total_capacity_gb) self.assertEqual(0, fake_backend.total_capacity_gb)
self.assertIsNone(fake_host.free_capacity_gb) self.assertIsNone(fake_backend.free_capacity_gb)
# Pool stats has been updated # Pool stats has been updated
self.assertEqual( self.assertEqual(
'infinite', 'infinite',
fake_host.pools['_pool0'].total_capacity_gb) fake_backend.pools['_pool0'].total_capacity_gb)
self.assertEqual( self.assertEqual(
'unknown', 'unknown',
fake_host.pools['_pool0'].free_capacity_gb) fake_backend.pools['_pool0'].free_capacity_gb)
def test_update_from_empty_volume_capability(self): def test_update_from_empty_volume_capability(self):
fake_host = host_manager.HostState('host1', None) fake_backend = host_manager.BackendState('host1', None)
vol_cap = {'timestamp': None} vol_cap = {'timestamp': None}
fake_host.update_from_volume_capability(vol_cap) fake_backend.update_from_volume_capability(vol_cap)
self.assertEqual(0, fake_host.total_capacity_gb) self.assertEqual(0, fake_backend.total_capacity_gb)
self.assertIsNone(fake_host.free_capacity_gb) self.assertIsNone(fake_backend.free_capacity_gb)
# Pool stats has been updated # Pool stats has been updated
self.assertEqual(0, self.assertEqual(0,
fake_host.pools['_pool0'].total_capacity_gb) fake_backend.pools['_pool0'].total_capacity_gb)
self.assertEqual(0, self.assertEqual(0,
fake_host.pools['_pool0'].free_capacity_gb) fake_backend.pools['_pool0'].free_capacity_gb)
self.assertEqual(0, self.assertEqual(0,
fake_host.pools['_pool0'].provisioned_capacity_gb) fake_backend.pools['_pool0'].provisioned_capacity_gb)
class PoolStateTestCase(test.TestCase): class PoolStateTestCase(test.TestCase):
"""Test case for HostState class.""" """Test case for BackendState class."""
def test_update_from_volume_capability(self): def test_update_from_volume_capability(self):
fake_pool = host_manager.PoolState('host1', None, None, 'pool0') fake_pool = host_manager.PoolState('host1', None, None, 'pool0')

View File

@ -126,9 +126,9 @@ class SchedulerManagerTestCase(test.TestCase):
def test_create_volume_exception_puts_volume_in_error_state( def test_create_volume_exception_puts_volume_in_error_state(
self, _mock_volume_update, _mock_message_create, self, _mock_volume_update, _mock_message_create,
_mock_sched_create): _mock_sched_create):
# Test NoValidHost exception behavior for create_volume. # Test NoValidBackend exception behavior for create_volume.
# Puts the volume in 'error' state and eats the exception. # Puts the volume in 'error' state and eats the exception.
_mock_sched_create.side_effect = exception.NoValidHost(reason="") _mock_sched_create.side_effect = exception.NoValidBackend(reason="")
volume = fake_volume.fake_volume_obj(self.context) volume = fake_volume.fake_volume_obj(self.context)
request_spec = {'volume_id': volume.id, request_spec = {'volume_id': volume.id,
'volume': {'id': volume.id, '_name_id': None, 'volume': {'id': volume.id, '_name_id': None,
@ -223,39 +223,39 @@ class SchedulerManagerTestCase(test.TestCase):
self.assertFalse(_mock_sleep.called) self.assertFalse(_mock_sleep.called)
@mock.patch('cinder.db.volume_get') @mock.patch('cinder.db.volume_get')
@mock.patch('cinder.scheduler.driver.Scheduler.host_passes_filters') @mock.patch('cinder.scheduler.driver.Scheduler.backend_passes_filters')
@mock.patch('cinder.db.volume_update') @mock.patch('cinder.db.volume_update')
def test_migrate_volume_exception_returns_volume_state( def test_migrate_volume_exception_returns_volume_state(
self, _mock_volume_update, _mock_host_passes, self, _mock_volume_update, _mock_backend_passes,
_mock_volume_get): _mock_volume_get):
# Test NoValidHost exception behavior for migrate_volume_to_host. # Test NoValidBackend exception behavior for migrate_volume_to_host.
# Puts the volume in 'error_migrating' state and eats the exception. # Puts the volume in 'error_migrating' state and eats the exception.
fake_updates = {'migration_status': 'error'} fake_updates = {'migration_status': 'error'}
self._test_migrate_volume_exception_returns_volume_state( self._test_migrate_volume_exception_returns_volume_state(
_mock_volume_update, _mock_host_passes, _mock_volume_get, _mock_volume_update, _mock_backend_passes, _mock_volume_get,
'available', fake_updates) 'available', fake_updates)
@mock.patch('cinder.db.volume_get') @mock.patch('cinder.db.volume_get')
@mock.patch('cinder.scheduler.driver.Scheduler.host_passes_filters') @mock.patch('cinder.scheduler.driver.Scheduler.backend_passes_filters')
@mock.patch('cinder.db.volume_update') @mock.patch('cinder.db.volume_update')
def test_migrate_volume_exception_returns_volume_state_maintenance( def test_migrate_volume_exception_returns_volume_state_maintenance(
self, _mock_volume_update, _mock_host_passes, self, _mock_volume_update, _mock_backend_passes,
_mock_volume_get): _mock_volume_get):
fake_updates = {'status': 'available', fake_updates = {'status': 'available',
'migration_status': 'error'} 'migration_status': 'error'}
self._test_migrate_volume_exception_returns_volume_state( self._test_migrate_volume_exception_returns_volume_state(
_mock_volume_update, _mock_host_passes, _mock_volume_get, _mock_volume_update, _mock_backend_passes, _mock_volume_get,
'maintenance', fake_updates) 'maintenance', fake_updates)
def _test_migrate_volume_exception_returns_volume_state( def _test_migrate_volume_exception_returns_volume_state(
self, _mock_volume_update, _mock_host_passes, self, _mock_volume_update, _mock_backend_passes,
_mock_volume_get, status, fake_updates): _mock_volume_get, status, fake_updates):
volume = tests_utils.create_volume(self.context, volume = tests_utils.create_volume(self.context,
status=status, status=status,
previous_status='available') previous_status='available')
fake_volume_id = volume.id fake_volume_id = volume.id
request_spec = {'volume_id': fake_volume_id} request_spec = {'volume_id': fake_volume_id}
_mock_host_passes.side_effect = exception.NoValidHost(reason="") _mock_backend_passes.side_effect = exception.NoValidBackend(reason="")
_mock_volume_get.return_value = volume _mock_volume_get.return_value = volume
self.manager.migrate_volume_to_host(self.context, volume, 'host', True, self.manager.migrate_volume_to_host(self.context, volume, 'host', True,
@ -264,15 +264,15 @@ class SchedulerManagerTestCase(test.TestCase):
_mock_volume_update.assert_called_once_with(self.context, _mock_volume_update.assert_called_once_with(self.context,
fake_volume_id, fake_volume_id,
fake_updates) fake_updates)
_mock_host_passes.assert_called_once_with(self.context, 'host', _mock_backend_passes.assert_called_once_with(self.context, 'host',
request_spec, {}) request_spec, {})
@mock.patch('cinder.db.volume_update') @mock.patch('cinder.db.volume_update')
@mock.patch('cinder.db.volume_attachment_get_all_by_volume_id') @mock.patch('cinder.db.volume_attachment_get_all_by_volume_id')
@mock.patch('cinder.quota.QUOTAS.rollback') @mock.patch('cinder.quota.QUOTAS.rollback')
def test_retype_volume_exception_returns_volume_state( def test_retype_volume_exception_returns_volume_state(
self, quota_rollback, _mock_vol_attachment_get, _mock_vol_update): self, quota_rollback, _mock_vol_attachment_get, _mock_vol_update):
# Test NoValidHost exception behavior for retype. # Test NoValidBackend exception behavior for retype.
# Puts the volume in original state and eats the exception. # Puts the volume in original state and eats the exception.
volume = tests_utils.create_volume(self.context, volume = tests_utils.create_volume(self.context,
status='retyping', status='retyping',
@ -287,17 +287,17 @@ class SchedulerManagerTestCase(test.TestCase):
'migration_policy': 'on-demand', 'migration_policy': 'on-demand',
'quota_reservations': reservations} 'quota_reservations': reservations}
_mock_vol_update.return_value = {'status': 'in-use'} _mock_vol_update.return_value = {'status': 'in-use'}
_mock_find_retype_host = mock.Mock( _mock_find_retype_backend = mock.Mock(
side_effect=exception.NoValidHost(reason="")) side_effect=exception.NoValidBackend(reason=""))
orig_retype = self.manager.driver.find_retype_host orig_retype = self.manager.driver.find_retype_backend
self.manager.driver.find_retype_host = _mock_find_retype_host self.manager.driver.find_retype_backend = _mock_find_retype_backend
self.manager.retype(self.context, volume, request_spec=request_spec, self.manager.retype(self.context, volume, request_spec=request_spec,
filter_properties={}) filter_properties={})
_mock_find_retype_host.assert_called_once_with(self.context, _mock_find_retype_backend.assert_called_once_with(self.context,
request_spec, {}, request_spec, {},
'on-demand') 'on-demand')
quota_rollback.assert_called_once_with(self.context, reservations) quota_rollback.assert_called_once_with(self.context, reservations)
_mock_vol_update.assert_called_once_with(self.context, volume.id, _mock_vol_update.assert_called_once_with(self.context, volume.id,
{'status': 'in-use'}) {'status': 'in-use'})
@ -329,7 +329,7 @@ class SchedulerManagerTestCase(test.TestCase):
LOG.exception.reset_mock() LOG.exception.reset_mock()
db.consistencygroup_update.reset_mock() db.consistencygroup_update.reset_mock()
mock_cg.side_effect = exception.NoValidHost( mock_cg.side_effect = exception.NoValidBackend(
reason="No weighed hosts available") reason="No weighed hosts available")
self.manager.create_consistencygroup( self.manager.create_consistencygroup(
self.context, consistencygroup_obj) self.context, consistencygroup_obj)

View File

@ -68,21 +68,21 @@ class VolumeNumberWeigherTestCase(test.TestCase):
weight_properties)[0] weight_properties)[0]
@mock.patch('cinder.db.sqlalchemy.api.service_get_all') @mock.patch('cinder.db.sqlalchemy.api.service_get_all')
def _get_all_hosts(self, _mock_service_get_all, disabled=False): def _get_all_backends(self, _mock_service_get_all, disabled=False):
ctxt = context.get_admin_context() ctxt = context.get_admin_context()
fakes.mock_host_manager_db_calls(_mock_service_get_all, fakes.mock_host_manager_db_calls(_mock_service_get_all,
disabled=disabled) disabled=disabled)
host_states = self.host_manager.get_all_host_states(ctxt) backend_states = self.host_manager.get_all_backend_states(ctxt)
_mock_service_get_all.assert_called_once_with( _mock_service_get_all.assert_called_once_with(
ctxt, ctxt,
None, # backend_match_level None, # backend_match_level
topic=constants.VOLUME_TOPIC, topic=constants.VOLUME_TOPIC,
disabled=disabled) disabled=disabled)
return host_states return backend_states
def test_volume_number_weight_multiplier1(self): def test_volume_number_weight_multiplier1(self):
self.flags(volume_number_multiplier=-1.0) self.flags(volume_number_multiplier=-1.0)
hostinfo_list = self._get_all_hosts() backend_info_list = self._get_all_backends()
# host1: 1 volume Norm=0.0 # host1: 1 volume Norm=0.0
# host2: 2 volumes # host2: 2 volumes
@ -92,14 +92,14 @@ class VolumeNumberWeigherTestCase(test.TestCase):
# so, host1 should win: # so, host1 should win:
with mock.patch.object(api, 'volume_data_get_for_host', with mock.patch.object(api, 'volume_data_get_for_host',
fake_volume_data_get_for_host): fake_volume_data_get_for_host):
weighed_host = self._get_weighed_host(hostinfo_list) weighed_host = self._get_weighed_host(backend_info_list)
self.assertEqual(0.0, weighed_host.weight) self.assertEqual(0.0, weighed_host.weight)
self.assertEqual('host1', self.assertEqual('host1',
utils.extract_host(weighed_host.obj.host)) utils.extract_host(weighed_host.obj.host))
def test_volume_number_weight_multiplier2(self): def test_volume_number_weight_multiplier2(self):
self.flags(volume_number_multiplier=1.0) self.flags(volume_number_multiplier=1.0)
hostinfo_list = self._get_all_hosts() backend_info_list = self._get_all_backends()
# host1: 1 volume Norm=0 # host1: 1 volume Norm=0
# host2: 2 volumes # host2: 2 volumes
@ -109,7 +109,7 @@ class VolumeNumberWeigherTestCase(test.TestCase):
# so, host5 should win: # so, host5 should win:
with mock.patch.object(api, 'volume_data_get_for_host', with mock.patch.object(api, 'volume_data_get_for_host',
fake_volume_data_get_for_host): fake_volume_data_get_for_host):
weighed_host = self._get_weighed_host(hostinfo_list) weighed_host = self._get_weighed_host(backend_info_list)
self.assertEqual(1.0, weighed_host.weight) self.assertEqual(1.0, weighed_host.weight)
self.assertEqual('host5', self.assertEqual('host5',
utils.extract_host(weighed_host.obj.host)) utils.extract_host(weighed_host.obj.host))

View File

@ -203,20 +203,30 @@ class DBAPIServiceTestCase(BaseTest):
def test_service_get_all(self): def test_service_get_all(self):
expired = (datetime.datetime.utcnow() expired = (datetime.datetime.utcnow()
- datetime.timedelta(seconds=CONF.service_down_time + 1)) - datetime.timedelta(seconds=CONF.service_down_time + 1))
db.cluster_create(self.ctxt, {'name': 'cluster_disabled',
'binary': 'fake_binary',
'disabled': True})
db.cluster_create(self.ctxt, {'name': 'cluster_enabled',
'binary': 'fake_binary',
'disabled': False})
values = [ values = [
# Now we are updating updated_at at creation as well so this one # Now we are updating updated_at at creation as well so this one
# is up. # is up.
{'host': 'host1', 'binary': 'b1', 'created_at': expired}, {'host': 'host1', 'binary': 'b1', 'created_at': expired},
{'host': 'host1@ceph', 'binary': 'b2'}, {'host': 'host1@ceph', 'binary': 'b2'},
{'host': 'host2', 'binary': 'b2'}, {'host': 'host2', 'binary': 'b2'},
{'disabled': False, 'cluster_name': 'cluster_enabled'},
{'disabled': True, 'cluster_name': 'cluster_enabled'},
{'disabled': False, 'cluster_name': 'cluster_disabled'},
{'disabled': True, 'cluster_name': 'cluster_disabled'},
{'disabled': True, 'created_at': expired, 'updated_at': expired}, {'disabled': True, 'created_at': expired, 'updated_at': expired},
] ]
services = [self._create_service(vals) for vals in values] services = [self._create_service(vals) for vals in values]
disabled_services = services[-1:] disabled_services = services[-3:]
non_disabled_services = services[:-1] non_disabled_services = services[:-3]
up_services = services[0:3] up_services = services[:7]
down_services = [services[3]] down_services = [services[7]]
expected = services[:2] expected = services[:2]
expected_bin = services[1:3] expected_bin = services[1:3]
compares = [ compares = [

View File

@ -156,15 +156,14 @@ class VolumeRpcAPITestCase(test.TestCase):
if 'host' in expected_msg: if 'host' in expected_msg:
del expected_msg['host'] del expected_msg['host']
if 'dest_host' in expected_msg: if 'dest_backend' in expected_msg:
dest_host = expected_msg.pop('dest_host') dest_backend = expected_msg.pop('dest_backend')
dest_host_dict = {'host': dest_host.host, dest_backend_dict = {'host': dest_backend.host,
'cluster_name': dest_host.cluster_name, 'cluster_name': dest_backend.cluster_name,
'capabilities': dest_host.capabilities} 'capabilities': dest_backend.capabilities}
expected_msg['host'] = dest_host_dict expected_msg['host'] = dest_backend_dict
if 'force_copy' in expected_msg: if 'force_copy' in expected_msg:
expected_msg['force_host_copy'] = expected_msg.pop('force_copy') expected_msg['force_host_copy'] = expected_msg.pop('force_copy')
if 'new_volume' in expected_msg: if 'new_volume' in expected_msg:
volume = expected_msg['new_volume'] volume = expected_msg['new_volume']
expected_msg['new_volume_id'] = volume['id'] expected_msg['new_volume_id'] = volume['id']
@ -544,16 +543,16 @@ class VolumeRpcAPITestCase(test.TestCase):
@mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True)
def test_migrate_volume(self, can_send_version): def test_migrate_volume(self, can_send_version):
class FakeHost(object): class FakeBackend(object):
def __init__(self): def __init__(self):
self.host = 'host' self.host = 'host'
self.cluster_name = 'cluster_name' self.cluster_name = 'cluster_name'
self.capabilities = {} self.capabilities = {}
dest_host = FakeHost() dest_backend = FakeBackend()
self._test_volume_api('migrate_volume', self._test_volume_api('migrate_volume',
rpc_method='cast', rpc_method='cast',
volume=self.fake_volume_obj, volume=self.fake_volume_obj,
dest_host=dest_host, dest_backend=dest_backend,
force_host_copy=True, force_host_copy=True,
version='3.5') version='3.5')
@ -567,17 +566,17 @@ class VolumeRpcAPITestCase(test.TestCase):
@mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True)
def test_retype(self, can_send_version): def test_retype(self, can_send_version):
class FakeHost(object): class FakeBackend(object):
def __init__(self): def __init__(self):
self.host = 'host' self.host = 'host'
self.cluster_name = 'cluster_name' self.cluster_name = 'cluster_name'
self.capabilities = {} self.capabilities = {}
dest_host = FakeHost() dest_backend = FakeBackend()
self._test_volume_api('retype', self._test_volume_api('retype',
rpc_method='cast', rpc_method='cast',
volume=self.fake_volume_obj, volume=self.fake_volume_obj,
new_type_id='fake', new_type_id='fake',
dest_host=dest_host, dest_backend=dest_backend,
migration_policy='never', migration_policy='never',
reservations=self.fake_reservations, reservations=self.fake_reservations,
old_reservations=self.fake_reservations, old_reservations=self.fake_reservations,
@ -608,7 +607,7 @@ class VolumeRpcAPITestCase(test.TestCase):
rpc_method='cast', rpc_method='cast',
snapshot=my_fake_snapshot_obj, snapshot=my_fake_snapshot_obj,
ref='foo', ref='foo',
host='fake_host', backend='fake_host',
version='3.0') version='3.0')
def test_freeze_host(self): def test_freeze_host(self):

View File

@ -1164,7 +1164,7 @@ class FlashSystemDriverTestCase(test.TestCase):
# case 3: host name is neither unicode nor string # case 3: host name is neither unicode nor string
conn = {'host': 12345} conn = {'host': 12345}
self.assertRaises(exception.NoValidHost, self.assertRaises(exception.NoValidBackend,
self.driver._connector_to_hostname_prefix, self.driver._connector_to_hostname_prefix,
conn) conn)

View File

@ -350,7 +350,7 @@ class NetAppESeriesDriverTestCase(object):
mock.Mock(side_effect=socket.gaierror)) mock.Mock(side_effect=socket.gaierror))
self.assertRaises( self.assertRaises(
exception.NoValidHost, exception.NoValidBackend,
driver.library._check_mode_get_or_register_storage_system) driver.library._check_mode_get_or_register_storage_system)
def test_setup_error_invalid_first_controller_ip(self): def test_setup_error_invalid_first_controller_ip(self):
@ -361,7 +361,7 @@ class NetAppESeriesDriverTestCase(object):
mock.Mock(side_effect=socket.gaierror)) mock.Mock(side_effect=socket.gaierror))
self.assertRaises( self.assertRaises(
exception.NoValidHost, exception.NoValidBackend,
driver.library._check_mode_get_or_register_storage_system) driver.library._check_mode_get_or_register_storage_system)
def test_setup_error_invalid_second_controller_ip(self): def test_setup_error_invalid_second_controller_ip(self):
@ -372,7 +372,7 @@ class NetAppESeriesDriverTestCase(object):
mock.Mock(side_effect=socket.gaierror)) mock.Mock(side_effect=socket.gaierror))
self.assertRaises( self.assertRaises(
exception.NoValidHost, exception.NoValidBackend,
driver.library._check_mode_get_or_register_storage_system) driver.library._check_mode_get_or_register_storage_system)
def test_setup_error_invalid_both_controller_ips(self): def test_setup_error_invalid_both_controller_ips(self):
@ -383,7 +383,7 @@ class NetAppESeriesDriverTestCase(object):
mock.Mock(side_effect=socket.gaierror)) mock.Mock(side_effect=socket.gaierror))
self.assertRaises( self.assertRaises(
exception.NoValidHost, exception.NoValidBackend,
driver.library._check_mode_get_or_register_storage_system) driver.library._check_mode_get_or_register_storage_system)
def test_manage_existing_get_size(self): def test_manage_existing_get_size(self):

View File

@ -189,7 +189,7 @@ class FlashSystemDriver(san.SanDriver,
msg = _('_create_host: Can not translate host name. Host name ' msg = _('_create_host: Can not translate host name. Host name '
'is not unicode or string.') 'is not unicode or string.')
LOG.error(msg) LOG.error(msg)
raise exception.NoValidHost(reason=msg) raise exception.NoValidBackend(reason=msg)
host_name = six.text_type(host_name) host_name = six.text_type(host_name)

View File

@ -259,7 +259,7 @@ class NetAppESeriesLibrary(object):
except socket.gaierror as e: except socket.gaierror as e:
LOG.error(_LE('Error resolving host %(host)s. Error - %(e)s.'), LOG.error(_LE('Error resolving host %(host)s. Error - %(e)s.'),
{'host': host, 'e': e}) {'host': host, 'e': e})
raise exception.NoValidHost( raise exception.NoValidBackend(
_("Controller IP '%(host)s' could not be resolved: %(e)s.") _("Controller IP '%(host)s' could not be resolved: %(e)s.")
% {'host': host, 'e': e}) % {'host': host, 'e': e})

View File

@ -244,10 +244,10 @@ class VolumeAPI(rpc.RPCAPI):
cctxt.cast(ctxt, 'extend_volume', volume=volume, new_size=new_size, cctxt.cast(ctxt, 'extend_volume', volume=volume, new_size=new_size,
reservations=reservations) reservations=reservations)
def migrate_volume(self, ctxt, volume, dest_host, force_host_copy): def migrate_volume(self, ctxt, volume, dest_backend, force_host_copy):
backend_p = {'host': dest_host.host, backend_p = {'host': dest_backend.host,
'cluster_name': dest_host.cluster_name, 'cluster_name': dest_backend.cluster_name,
'capabilities': dest_host.capabilities} 'capabilities': dest_backend.capabilities}
version = '3.5' version = '3.5'
if not self.client.can_send_version(version): if not self.client.can_send_version(version):
@ -263,12 +263,12 @@ class VolumeAPI(rpc.RPCAPI):
return cctxt.call(ctxt, 'migrate_volume_completion', volume=volume, return cctxt.call(ctxt, 'migrate_volume_completion', volume=volume,
new_volume=new_volume, error=error,) new_volume=new_volume, error=error,)
def retype(self, ctxt, volume, new_type_id, dest_host, def retype(self, ctxt, volume, new_type_id, dest_backend,
migration_policy='never', reservations=None, migration_policy='never', reservations=None,
old_reservations=None): old_reservations=None):
backend_p = {'host': dest_host.host, backend_p = {'host': dest_backend.host,
'cluster_name': dest_host.cluster_name, 'cluster_name': dest_backend.cluster_name,
'capabilities': dest_host.capabilities} 'capabilities': dest_backend.capabilities}
version = '3.5' version = '3.5'
if not self.client.can_send_version(version): if not self.client.can_send_version(version):
version = '3.0' version = '3.0'
@ -308,8 +308,8 @@ class VolumeAPI(rpc.RPCAPI):
cctxt.cast(ctxt, 'failover_host', cctxt.cast(ctxt, 'failover_host',
secondary_backend_id=secondary_backend_id) secondary_backend_id=secondary_backend_id)
def manage_existing_snapshot(self, ctxt, snapshot, ref, host): def manage_existing_snapshot(self, ctxt, snapshot, ref, backend):
cctxt = self._get_cctxt(host) cctxt = self._get_cctxt(backend)
cctxt.cast(ctxt, 'manage_existing_snapshot', cctxt.cast(ctxt, 'manage_existing_snapshot',
snapshot=snapshot, snapshot=snapshot,
ref=ref) ref=ref)