Continue renaming volume_utils (core)

Now that volume_utils has been renamed, import it
and use it consistently everywhere.

Change-Id: I6a74f664ff890ff3f24f715a1e93df7e0384aa6b
This commit is contained in:
Eric Harney 2019-09-05 11:37:42 -04:00
parent de789648e5
commit ca5c2ce4e8
13 changed files with 128 additions and 109 deletions

View File

@ -85,7 +85,7 @@ from cinder import rpc
from cinder.scheduler import rpcapi as scheduler_rpcapi from cinder.scheduler import rpcapi as scheduler_rpcapi
from cinder import version from cinder import version
from cinder.volume import rpcapi as volume_rpcapi from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import volume_utils as vutils from cinder.volume import volume_utils
CONF = cfg.CONF CONF = cfg.CONF
@ -444,7 +444,7 @@ class VolumeCommands(object):
"""Delete a volume, bypassing the check that it must be available.""" """Delete a volume, bypassing the check that it must be available."""
ctxt = context.get_admin_context() ctxt = context.get_admin_context()
volume = objects.Volume.get_by_id(ctxt, volume_id) volume = objects.Volume.get_by_id(ctxt, volume_id)
host = vutils.extract_host(volume.host) if volume.host else None host = volume_utils.extract_host(volume.host) if volume.host else None
if not host: if not host:
print(_("Volume not yet assigned to host.")) print(_("Volume not yet assigned to host."))

View File

@ -66,7 +66,7 @@ from cinder.i18n import _
from cinder import objects from cinder import objects
from cinder.objects import fields from cinder.objects import fields
from cinder import utils from cinder import utils
from cinder.volume import volume_utils as vol_utils from cinder.volume import volume_utils
CONF = cfg.CONF CONF = cfg.CONF
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -577,10 +577,10 @@ def is_backend_frozen(context, host, cluster_name):
"""Check if a storage backend is frozen based on host and cluster_name.""" """Check if a storage backend is frozen based on host and cluster_name."""
if cluster_name: if cluster_name:
model = models.Cluster model = models.Cluster
conditions = [model.name == vol_utils.extract_host(cluster_name)] conditions = [model.name == volume_utils.extract_host(cluster_name)]
else: else:
model = models.Service model = models.Service
conditions = [model.host == vol_utils.extract_host(host)] conditions = [model.host == volume_utils.extract_host(host)]
conditions.extend((~model.deleted, model.frozen)) conditions.extend((~model.deleted, model.frozen))
query = get_session().query(sql.exists().where(and_(*conditions))) query = get_session().query(sql.exists().where(and_(*conditions)))
frozen = query.scalar() frozen = query.scalar()

View File

@ -40,7 +40,7 @@ from cinder.scheduler import rpcapi as scheduler_rpcapi
from cinder.volume import api as volume_api from cinder.volume import api as volume_api
from cinder.volume import rpcapi as volume_rpcapi from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import volume_types from cinder.volume import volume_types
from cinder.volume import volume_utils as vol_utils from cinder.volume import volume_utils
CONF = cfg.CONF CONF = cfg.CONF
@ -785,8 +785,8 @@ class API(base.Base):
# group.host and add_vol_ref['host'] are in this format: # group.host and add_vol_ref['host'] are in this format:
# 'host@backend#pool'. Extract host (host@backend) before # 'host@backend#pool'. Extract host (host@backend) before
# doing comparison. # doing comparison.
vol_host = vol_utils.extract_host(add_vol_ref['host']) vol_host = volume_utils.extract_host(add_vol_ref['host'])
group_host = vol_utils.extract_host(group.host) group_host = volume_utils.extract_host(group.host)
if group_host != vol_host: if group_host != vol_host:
raise exception.InvalidVolume( raise exception.InvalidVolume(
reason=_("Volume is not local to this node.")) reason=_("Volume is not local to this node."))
@ -956,7 +956,7 @@ class API(base.Base):
raise exception.InvalidGroupType(reason=msg) raise exception.InvalidGroupType(reason=msg)
for vol_type in group.volume_types: for vol_type in group.volume_types:
if not vol_utils.is_replicated_spec(vol_type.extra_specs): if not volume_utils.is_replicated_spec(vol_type.extra_specs):
msg = _("Volume type %s does not have 'replication_enabled' " msg = _("Volume type %s does not have 'replication_enabled' "
"spec key set to '<is> True'.") % vol_type.id "spec key set to '<is> True'.") % vol_type.id
LOG.error(msg) LOG.error(msg)

View File

@ -28,7 +28,7 @@ from cinder import exception
from cinder.i18n import _ from cinder.i18n import _
from cinder.scheduler import driver from cinder.scheduler import driver
from cinder.scheduler import scheduler_options from cinder.scheduler import scheduler_options
from cinder.volume import volume_utils as utils from cinder.volume import volume_utils
CONF = cfg.CONF CONF = cfg.CONF
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -117,11 +117,11 @@ class FilterScheduler(driver.Scheduler):
weighed_backends = self._get_weighted_candidates(context, request_spec, weighed_backends = self._get_weighted_candidates(context, request_spec,
filter_properties) filter_properties)
# If backend has no pool defined we will ignore it in the comparison # If backend has no pool defined we will ignore it in the comparison
ignore_pool = not bool(utils.extract_host(backend, 'pool')) ignore_pool = not bool(volume_utils.extract_host(backend, 'pool'))
for weighed_backend in weighed_backends: for weighed_backend in weighed_backends:
backend_id = weighed_backend.obj.backend_id backend_id = weighed_backend.obj.backend_id
if ignore_pool: if ignore_pool:
backend_id = utils.extract_host(backend_id) backend_id = volume_utils.extract_host(backend_id)
if backend_id == backend: if backend_id == backend:
return weighed_backend.obj return weighed_backend.obj
@ -160,7 +160,7 @@ class FilterScheduler(driver.Scheduler):
if backend_state.backend_id == backend: if backend_state.backend_id == backend:
return backend_state return backend_state
if utils.extract_host(backend, 'pool') is None: if volume_utils.extract_host(backend, 'pool') is None:
# legacy volumes created before pool is introduced has no pool # legacy volumes created before pool is introduced has no pool
# info in host. But host_state.host always include pool level # info in host. But host_state.host always include pool level
# info. In this case if above exact match didn't work out, we # info. In this case if above exact match didn't work out, we
@ -171,8 +171,9 @@ class FilterScheduler(driver.Scheduler):
# to happen even migration policy is 'never'. # to happen even migration policy is 'never'.
for weighed_backend in weighed_backends: for weighed_backend in weighed_backends:
backend_state = weighed_backend.obj backend_state = weighed_backend.obj
new_backend = utils.extract_host(backend_state.backend_id, new_backend = volume_utils.extract_host(
'backend') backend_state.backend_id,
'backend')
if new_backend == backend: if new_backend == backend:
return backend_state return backend_state
@ -447,8 +448,8 @@ class FilterScheduler(driver.Scheduler):
for backend2 in backend_list2: for backend2 in backend_list2:
# Should schedule creation of group on backend level, # Should schedule creation of group on backend level,
# not pool level. # not pool level.
if (utils.extract_host(backend1.obj.backend_id) == if (volume_utils.extract_host(backend1.obj.backend_id) ==
utils.extract_host(backend2.obj.backend_id)): volume_utils.extract_host(backend2.obj.backend_id)):
new_backends.append(backend1) new_backends.append(backend1)
if not new_backends: if not new_backends:
return [] return []
@ -526,14 +527,14 @@ class FilterScheduler(driver.Scheduler):
# snapshot or volume). # snapshot or volume).
resource_backend = request_spec.get('resource_backend') resource_backend = request_spec.get('resource_backend')
if weighed_backends and resource_backend: if weighed_backends and resource_backend:
resource_backend_has_pool = bool(utils.extract_host( resource_backend_has_pool = bool(volume_utils.extract_host(
resource_backend, 'pool')) resource_backend, 'pool'))
# Get host name including host@backend#pool info from # Get host name including host@backend#pool info from
# weighed_backends. # weighed_backends.
for backend in weighed_backends[::-1]: for backend in weighed_backends[::-1]:
backend_id = ( backend_id = (
backend.obj.backend_id if resource_backend_has_pool backend.obj.backend_id if resource_backend_has_pool
else utils.extract_host(backend.obj.backend_id) else volume_utils.extract_host(backend.obj.backend_id)
) )
if backend_id != resource_backend: if backend_id != resource_backend:
weighed_backends.remove(backend) weighed_backends.remove(backend)

View File

@ -35,7 +35,7 @@ from cinder import objects
from cinder.scheduler import filters from cinder.scheduler import filters
from cinder import utils from cinder import utils
from cinder.volume import volume_types from cinder.volume import volume_types
from cinder.volume import volume_utils as vol_utils from cinder.volume import volume_utils
# FIXME: This file should be renamed to backend_manager, we should also rename # FIXME: This file should be renamed to backend_manager, we should also rename
@ -246,7 +246,7 @@ class BackendState(object):
pool_name = self.volume_backend_name pool_name = self.volume_backend_name
if pool_name is None: if pool_name is None:
# To get DEFAULT_POOL_NAME # To get DEFAULT_POOL_NAME
pool_name = vol_utils.extract_host(self.host, 'pool', True) pool_name = volume_utils.extract_host(self.host, 'pool', True)
if len(self.pools) == 0: if len(self.pools) == 0:
# No pool was there # No pool was there
@ -349,8 +349,8 @@ class BackendState(object):
class PoolState(BackendState): class PoolState(BackendState):
def __init__(self, host, cluster_name, capabilities, pool_name): def __init__(self, host, cluster_name, capabilities, pool_name):
new_host = vol_utils.append_host(host, pool_name) new_host = volume_utils.append_host(host, pool_name)
new_cluster = vol_utils.append_host(cluster_name, pool_name) new_cluster = volume_utils.append_host(cluster_name, pool_name)
super(PoolState, self).__init__(new_host, new_cluster, capabilities) super(PoolState, self).__init__(new_host, new_cluster, capabilities)
self.pool_name = pool_name self.pool_name = pool_name
# No pools in pool # No pools in pool
@ -726,7 +726,8 @@ class HostManager(object):
filtered = False filtered = False
pool = state.pools[key] pool = state.pools[key]
# use backend_key.pool_name to make sure key is unique # use backend_key.pool_name to make sure key is unique
pool_key = vol_utils.append_host(backend_key, pool.pool_name) pool_key = volume_utils.append_host(backend_key,
pool.pool_name)
new_pool = dict(name=pool_key) new_pool = dict(name=pool_key)
new_pool.update(dict(capabilities=pool.capabilities)) new_pool.update(dict(capabilities=pool.capabilities))
@ -871,7 +872,7 @@ class HostManager(object):
def _notify_capacity_usage(self, context, usage): def _notify_capacity_usage(self, context, usage):
if usage: if usage:
for u in usage: for u in usage:
vol_utils.notify_about_capacity_usage( volume_utils.notify_about_capacity_usage(
context, u, u['type'], None, None) context, u, u['type'], None, None)
LOG.debug("Publish storage capacity: %s.", usage) LOG.debug("Publish storage capacity: %s.", usage)

View File

@ -22,7 +22,7 @@ from oslo_utils import uuidutils
from cinder.scheduler import filter_scheduler from cinder.scheduler import filter_scheduler
from cinder.scheduler import host_manager from cinder.scheduler import host_manager
from cinder.volume import volume_utils as utils from cinder.volume import volume_utils
UTC_NOW = timeutils.utcnow() UTC_NOW = timeutils.utcnow()
@ -301,7 +301,8 @@ def mock_host_manager_db_calls(mock_obj, backends_with_pools=False,
{ {
'id': sid, 'id': sid,
'host': svc, 'host': svc,
'availability_zone': az_map[utils.extract_host(svc, 'host')], 'availability_zone': az_map[volume_utils.extract_host(svc,
'host')],
'topic': 'volume', 'topic': 'volume',
'disabled': False, 'disabled': False,
'updated_at': timeutils.utcnow(), 'updated_at': timeutils.utcnow(),

View File

@ -24,7 +24,7 @@ from cinder import context
from cinder.scheduler import weights from cinder.scheduler import weights
from cinder import test from cinder import test
from cinder.tests.unit.scheduler import fakes from cinder.tests.unit.scheduler import fakes
from cinder.volume import volume_utils as utils from cinder.volume import volume_utils
class AllocatedCapacityWeigherTestCase(test.TestCase): class AllocatedCapacityWeigherTestCase(test.TestCase):
@ -66,7 +66,7 @@ class AllocatedCapacityWeigherTestCase(test.TestCase):
weighed_host = self._get_weighed_host(hostinfo_list) weighed_host = self._get_weighed_host(hostinfo_list)
self.assertEqual(0.0, weighed_host.weight) self.assertEqual(0.0, weighed_host.weight)
self.assertEqual( self.assertEqual(
'host1', utils.extract_host(weighed_host.obj.host)) 'host1', volume_utils.extract_host(weighed_host.obj.host))
def test_capacity_weight_multiplier1(self): def test_capacity_weight_multiplier1(self):
self.flags(allocated_capacity_weight_multiplier=1.0) self.flags(allocated_capacity_weight_multiplier=1.0)
@ -82,7 +82,7 @@ class AllocatedCapacityWeigherTestCase(test.TestCase):
weighed_host = self._get_weighed_host(hostinfo_list) weighed_host = self._get_weighed_host(hostinfo_list)
self.assertEqual(1.0, weighed_host.weight) self.assertEqual(1.0, weighed_host.weight)
self.assertEqual( self.assertEqual(
'host4', utils.extract_host(weighed_host.obj.host)) 'host4', volume_utils.extract_host(weighed_host.obj.host))
def test_capacity_weight_multiplier2(self): def test_capacity_weight_multiplier2(self):
self.flags(allocated_capacity_weight_multiplier=-2.0) self.flags(allocated_capacity_weight_multiplier=-2.0)
@ -98,4 +98,4 @@ class AllocatedCapacityWeigherTestCase(test.TestCase):
weighed_host = self._get_weighed_host(hostinfo_list) weighed_host = self._get_weighed_host(hostinfo_list)
self.assertEqual(0.0, weighed_host.weight) self.assertEqual(0.0, weighed_host.weight)
self.assertEqual( self.assertEqual(
'host1', utils.extract_host(weighed_host.obj.host)) 'host1', volume_utils.extract_host(weighed_host.obj.host))

View File

@ -25,7 +25,7 @@ from cinder import context
from cinder.scheduler import weights from cinder.scheduler import weights
from cinder import test from cinder import test
from cinder.tests.unit.scheduler import fakes from cinder.tests.unit.scheduler import fakes
from cinder.volume import volume_utils as utils from cinder.volume import volume_utils
@ddt.ddt @ddt.ddt
@ -109,7 +109,8 @@ class CapacityWeigherTestCase(test.TestCase):
backend_info_list, backend_info_list,
weight_properties=weight_properties)[0] weight_properties=weight_properties)[0]
self.assertEqual(1.0, weighed_host.weight) self.assertEqual(1.0, weighed_host.weight)
self.assertEqual(winner, utils.extract_host(weighed_host.obj.host)) self.assertEqual(winner,
volume_utils.extract_host(weighed_host.obj.host))
@ddt.data( @ddt.data(
{'volume_type': {'extra_specs': {'provisioning:type': 'thin'}}, {'volume_type': {'extra_specs': {'provisioning:type': 'thin'}},
@ -158,7 +159,8 @@ class CapacityWeigherTestCase(test.TestCase):
weight_properties=weight_properties) weight_properties=weight_properties)
weighed_host = weighed_host[0] weighed_host = weighed_host[0]
self.assertEqual(0.0, weighed_host.weight) self.assertEqual(0.0, weighed_host.weight)
self.assertEqual(winner, utils.extract_host(weighed_host.obj.host)) self.assertEqual(winner,
volume_utils.extract_host(weighed_host.obj.host))
@ddt.data( @ddt.data(
{'volume_type': {'extra_specs': {'provisioning:type': 'thin'}}, {'volume_type': {'extra_specs': {'provisioning:type': 'thin'}},
@ -206,7 +208,8 @@ class CapacityWeigherTestCase(test.TestCase):
backend_info_list, backend_info_list,
weight_properties=weight_properties)[0] weight_properties=weight_properties)[0]
self.assertEqual(1.0 * 2, weighed_host.weight) self.assertEqual(1.0 * 2, weighed_host.weight)
self.assertEqual(winner, utils.extract_host(weighed_host.obj.host)) self.assertEqual(winner,
volume_utils.extract_host(weighed_host.obj.host))
def test_capacity_weight_no_unknown_or_infinite(self): def test_capacity_weight_no_unknown_or_infinite(self):
self.flags(capacity_weight_multiplier=-1.0) self.flags(capacity_weight_multiplier=-1.0)
@ -233,11 +236,13 @@ class CapacityWeigherTestCase(test.TestCase):
weighed_hosts = self._get_weighed_hosts(backend_info_list) weighed_hosts = self._get_weighed_hosts(backend_info_list)
best_host = weighed_hosts[0] best_host = weighed_hosts[0]
self.assertEqual(0.0, best_host.weight) self.assertEqual(0.0, best_host.weight)
self.assertEqual('host4', utils.extract_host(best_host.obj.host)) self.assertEqual('host4',
volume_utils.extract_host(best_host.obj.host))
# and host2 is the worst: # and host2 is the worst:
worst_host = weighed_hosts[-1] worst_host = weighed_hosts[-1]
self.assertEqual(-1.0, worst_host.weight) self.assertEqual(-1.0, worst_host.weight)
self.assertEqual('host2', utils.extract_host(worst_host.obj.host)) self.assertEqual('host2',
volume_utils.extract_host(worst_host.obj.host))
def test_capacity_weight_free_unknown(self): def test_capacity_weight_free_unknown(self):
self.flags(capacity_weight_multiplier=-1.0) self.flags(capacity_weight_multiplier=-1.0)
@ -275,11 +280,13 @@ class CapacityWeigherTestCase(test.TestCase):
weighed_hosts = self._get_weighed_hosts(backend_info_list) weighed_hosts = self._get_weighed_hosts(backend_info_list)
best_host = weighed_hosts[0] best_host = weighed_hosts[0]
self.assertEqual(0.0, best_host.weight) self.assertEqual(0.0, best_host.weight)
self.assertEqual('host4', utils.extract_host(best_host.obj.host)) self.assertEqual('host4',
volume_utils.extract_host(best_host.obj.host))
# and host5 is the worst: # and host5 is the worst:
worst_host = weighed_hosts[-1] worst_host = weighed_hosts[-1]
self.assertEqual(-1.0, worst_host.weight) self.assertEqual(-1.0, worst_host.weight)
self.assertEqual('host5', utils.extract_host(worst_host.obj.host)) self.assertEqual('host5',
volume_utils.extract_host(worst_host.obj.host))
def test_capacity_weight_cap_unknown(self): def test_capacity_weight_cap_unknown(self):
self.flags(capacity_weight_multiplier=-1.0) self.flags(capacity_weight_multiplier=-1.0)
@ -317,11 +324,13 @@ class CapacityWeigherTestCase(test.TestCase):
weighed_hosts = self._get_weighed_hosts(backend_info_list) weighed_hosts = self._get_weighed_hosts(backend_info_list)
best_host = weighed_hosts[0] best_host = weighed_hosts[0]
self.assertEqual(0.0, best_host.weight) self.assertEqual(0.0, best_host.weight)
self.assertEqual('host4', utils.extract_host(best_host.obj.host)) self.assertEqual('host4',
volume_utils.extract_host(best_host.obj.host))
# and host5 is the worst: # and host5 is the worst:
worst_host = weighed_hosts[-1] worst_host = weighed_hosts[-1]
self.assertEqual(-1.0, worst_host.weight) self.assertEqual(-1.0, worst_host.weight)
self.assertEqual('host5', utils.extract_host(worst_host.obj.host)) self.assertEqual('host5',
volume_utils.extract_host(worst_host.obj.host))
def test_capacity_weight_free_infinite(self): def test_capacity_weight_free_infinite(self):
self.flags(capacity_weight_multiplier=-1.0) self.flags(capacity_weight_multiplier=-1.0)
@ -359,11 +368,13 @@ class CapacityWeigherTestCase(test.TestCase):
weighed_hosts = self._get_weighed_hosts(backend_info_list) weighed_hosts = self._get_weighed_hosts(backend_info_list)
best_host = weighed_hosts[0] best_host = weighed_hosts[0]
self.assertEqual(0.0, best_host.weight) self.assertEqual(0.0, best_host.weight)
self.assertEqual('host4', utils.extract_host(best_host.obj.host)) self.assertEqual('host4',
volume_utils.extract_host(best_host.obj.host))
# and host5 is the worst: # and host5 is the worst:
worst_host = weighed_hosts[-1] worst_host = weighed_hosts[-1]
self.assertEqual(-1.0, worst_host.weight) self.assertEqual(-1.0, worst_host.weight)
self.assertEqual('host5', utils.extract_host(worst_host.obj.host)) self.assertEqual('host5',
volume_utils.extract_host(worst_host.obj.host))
def test_capacity_weight_cap_infinite(self): def test_capacity_weight_cap_infinite(self):
self.flags(capacity_weight_multiplier=-1.0) self.flags(capacity_weight_multiplier=-1.0)
@ -401,8 +412,10 @@ class CapacityWeigherTestCase(test.TestCase):
weighed_hosts = self._get_weighed_hosts(backend_info_list) weighed_hosts = self._get_weighed_hosts(backend_info_list)
best_host = weighed_hosts[0] best_host = weighed_hosts[0]
self.assertEqual(0.0, best_host.weight) self.assertEqual(0.0, best_host.weight)
self.assertEqual('host4', utils.extract_host(best_host.obj.host)) self.assertEqual('host4',
volume_utils.extract_host(best_host.obj.host))
# and host5 is the worst: # and host5 is the worst:
worst_host = weighed_hosts[-1] worst_host = weighed_hosts[-1]
self.assertEqual(-1.0, worst_host.weight) self.assertEqual(-1.0, worst_host.weight)
self.assertEqual('host5', utils.extract_host(worst_host.obj.host)) self.assertEqual('host5',
volume_utils.extract_host(worst_host.obj.host))

View File

@ -27,7 +27,7 @@ from cinder.scheduler import host_manager
from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit.scheduler import fakes from cinder.tests.unit.scheduler import fakes
from cinder.tests.unit.scheduler import test_scheduler from cinder.tests.unit.scheduler import test_scheduler
from cinder.volume import volume_utils as utils from cinder.volume import volume_utils
@ddt.ddt @ddt.ddt
@ -379,7 +379,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
request_spec = objects.RequestSpec.from_primitives(request_spec) request_spec = objects.RequestSpec.from_primitives(request_spec)
ret_host = sched.backend_passes_filters(ctx, 'host1#lvm1', ret_host = sched.backend_passes_filters(ctx, 'host1#lvm1',
request_spec, {}) request_spec, {})
self.assertEqual('host1', utils.extract_host(ret_host.host)) self.assertEqual('host1', volume_utils.extract_host(ret_host.host))
self.assertTrue(_mock_service_get_topic.called) self.assertTrue(_mock_service_get_topic.called)
@mock.patch('cinder.db.service_get_all') @mock.patch('cinder.db.service_get_all')
@ -395,7 +395,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
request_spec = objects.RequestSpec.from_primitives(request_spec) request_spec = objects.RequestSpec.from_primitives(request_spec)
ret_host = sched.backend_passes_filters(ctx, 'host5#_pool0', ret_host = sched.backend_passes_filters(ctx, 'host5#_pool0',
request_spec, {}) request_spec, {})
self.assertEqual('host5', utils.extract_host(ret_host.host)) self.assertEqual('host5', volume_utils.extract_host(ret_host.host))
self.assertTrue(_mock_service_get_topic.called) self.assertTrue(_mock_service_get_topic.called)
@mock.patch('cinder.db.service_get_all') @mock.patch('cinder.db.service_get_all')
@ -408,7 +408,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
'size': 1}} 'size': 1}}
request_spec = objects.RequestSpec.from_primitives(request_spec) request_spec = objects.RequestSpec.from_primitives(request_spec)
ret_host = sched.backend_passes_filters(ctx, 'host1', request_spec, {}) ret_host = sched.backend_passes_filters(ctx, 'host1', request_spec, {})
self.assertEqual('host1', utils.extract_host(ret_host.host)) self.assertEqual('host1', volume_utils.extract_host(ret_host.host))
self.assertTrue(mock_service_get_all.called) self.assertTrue(mock_service_get_all.called)
@mock.patch('cinder.db.service_get_all') @mock.patch('cinder.db.service_get_all')
@ -481,7 +481,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
host_state = sched.find_retype_backend(ctx, request_spec, host_state = sched.find_retype_backend(ctx, request_spec,
filter_properties={}, filter_properties={},
migration_policy='never') migration_policy='never')
self.assertEqual('host4', utils.extract_host(host_state.host)) self.assertEqual('host4', volume_utils.extract_host(host_state.host))
@mock.patch('cinder.db.service_get_all') @mock.patch('cinder.db.service_get_all')
def test_retype_with_pool_policy_never_migrate_pass( def test_retype_with_pool_policy_never_migrate_pass(
@ -540,7 +540,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
host_state = sched.find_retype_backend(ctx, request_spec, host_state = sched.find_retype_backend(ctx, request_spec,
filter_properties={}, filter_properties={},
migration_policy='on-demand') migration_policy='on-demand')
self.assertEqual('host1', utils.extract_host(host_state.host)) self.assertEqual('host1', volume_utils.extract_host(host_state.host))
@mock.patch('cinder.db.service_get_all') @mock.patch('cinder.db.service_get_all')
def test_retype_policy_demand_migrate_fail(self, _mock_service_get_topic): def test_retype_policy_demand_migrate_fail(self, _mock_service_get_topic):

View File

@ -26,7 +26,7 @@ from cinder.tests.unit import utils as tests_utils
from cinder.tests.unit import volume as base from cinder.tests.unit import volume as base
from cinder.volume import driver from cinder.volume import driver
from cinder.volume import volume_migration as volume_migration from cinder.volume import volume_migration as volume_migration
from cinder.volume import volume_utils as volutils from cinder.volume import volume_utils
CONF = cfg.CONF CONF = cfg.CONF
@ -44,16 +44,16 @@ class VolumeInitHostTestCase(base.BaseVolumeTestCase):
self.context, size=100, host=CONF.host) self.context, size=100, host=CONF.host)
vol1 = tests_utils.create_volume( vol1 = tests_utils.create_volume(
self.context, size=128, self.context, size=128,
host=volutils.append_host(CONF.host, 'pool0')) host=volume_utils.append_host(CONF.host, 'pool0'))
vol2 = tests_utils.create_volume( vol2 = tests_utils.create_volume(
self.context, size=256, self.context, size=256,
host=volutils.append_host(CONF.host, 'pool0')) host=volume_utils.append_host(CONF.host, 'pool0'))
vol3 = tests_utils.create_volume( vol3 = tests_utils.create_volume(
self.context, size=512, self.context, size=512,
host=volutils.append_host(CONF.host, 'pool1')) host=volume_utils.append_host(CONF.host, 'pool1'))
vol4 = tests_utils.create_volume( vol4 = tests_utils.create_volume(
self.context, size=1024, self.context, size=1024,
host=volutils.append_host(CONF.host, 'pool2')) host=volume_utils.append_host(CONF.host, 'pool2'))
self.volume.init_host(service_id=self.service_id) self.volume.init_host(service_id=self.service_id)
init_host_mock.assert_called_once_with( init_host_mock.assert_called_once_with(
service_id=self.service_id, added_to_cluster=None) service_id=self.service_id, added_to_cluster=None)
@ -74,7 +74,7 @@ class VolumeInitHostTestCase(base.BaseVolumeTestCase):
# to be volume_backend_name or None # to be volume_backend_name or None
vol0.refresh() vol0.refresh()
expected_host = volutils.append_host(CONF.host, 'fake') expected_host = volume_utils.append_host(CONF.host, 'fake')
self.assertEqual(expected_host, vol0.host) self.assertEqual(expected_host, vol0.host)
self.volume.delete_volume(self.context, vol0) self.volume.delete_volume(self.context, vol0)
self.volume.delete_volume(self.context, vol1) self.volume.delete_volume(self.context, vol1)
@ -100,24 +100,24 @@ class VolumeInitHostTestCase(base.BaseVolumeTestCase):
cluster_name=cluster_name) cluster_name=cluster_name)
tests_utils.create_volume( tests_utils.create_volume(
self.context, size=128, cluster_name=cluster_name, self.context, size=128, cluster_name=cluster_name,
host=volutils.append_host(CONF.host, 'pool0')) host=volume_utils.append_host(CONF.host, 'pool0'))
tests_utils.create_volume( tests_utils.create_volume(
self.context, size=256, cluster_name=cluster_name, self.context, size=256, cluster_name=cluster_name,
host=volutils.append_host(CONF.host + '2', 'pool0')) host=volume_utils.append_host(CONF.host + '2', 'pool0'))
tests_utils.create_volume( tests_utils.create_volume(
self.context, size=512, cluster_name=cluster_name, self.context, size=512, cluster_name=cluster_name,
host=volutils.append_host(CONF.host + '2', 'pool1')) host=volume_utils.append_host(CONF.host + '2', 'pool1'))
tests_utils.create_volume( tests_utils.create_volume(
self.context, size=1024, cluster_name=cluster_name, self.context, size=1024, cluster_name=cluster_name,
host=volutils.append_host(CONF.host + '3', 'pool2')) host=volume_utils.append_host(CONF.host + '3', 'pool2'))
# These don't belong to the cluster so they will be ignored # These don't belong to the cluster so they will be ignored
tests_utils.create_volume( tests_utils.create_volume(
self.context, size=1024, self.context, size=1024,
host=volutils.append_host(CONF.host, 'pool2')) host=volume_utils.append_host(CONF.host, 'pool2'))
tests_utils.create_volume( tests_utils.create_volume(
self.context, size=1024, cluster_name=cluster_name + '1', self.context, size=1024, cluster_name=cluster_name + '1',
host=volutils.append_host(CONF.host + '3', 'pool2')) host=volume_utils.append_host(CONF.host + '3', 'pool2'))
self.volume.init_host(service_id=self.service_id) self.volume.init_host(service_id=self.service_id)
init_host_mock.assert_called_once_with( init_host_mock.assert_called_once_with(

View File

@ -41,7 +41,7 @@ from cinder.volume import api as volume_api
from cinder.volume.flows.manager import create_volume as create_volume_manager from cinder.volume.flows.manager import create_volume as create_volume_manager
from cinder.volume import rpcapi as volume_rpcapi from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import volume_types from cinder.volume import volume_types
from cinder.volume import volume_utils as volutils from cinder.volume import volume_utils
QUOTAS = quota.QUOTAS QUOTAS = quota.QUOTAS
@ -556,7 +556,7 @@ class VolumeMigrationTestCase(base.BaseVolumeTestCase):
mock.patch.object(os_brick.initiator.connector, mock.patch.object(os_brick.initiator.connector,
'get_connector_properties') \ 'get_connector_properties') \
as mock_get_connector_properties, \ as mock_get_connector_properties, \
mock.patch.object(volutils, 'copy_volume') as mock_copy, \ mock.patch.object(volume_utils, 'copy_volume') as mock_copy, \
mock.patch.object(volume_rpcapi.VolumeAPI, mock.patch.object(volume_rpcapi.VolumeAPI,
'get_capabilities') \ 'get_capabilities') \
as mock_get_capabilities: as mock_get_capabilities:

View File

@ -30,7 +30,7 @@ from cinder import quota_utils
from cinder import utils from cinder import utils
from cinder.volume.flows import common from cinder.volume.flows import common
from cinder.volume import volume_types from cinder.volume import volume_types
from cinder.volume import volume_utils as vol_utils from cinder.volume import volume_utils
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -223,7 +223,7 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
# exist, this is expected as it signals that the image_id is missing. # exist, this is expected as it signals that the image_id is missing.
image_meta = self.image_service.show(context, image_id) image_meta = self.image_service.show(context, image_id)
vol_utils.check_image_metadata(image_meta, size) volume_utils.check_image_metadata(image_meta, size)
return image_meta return image_meta
@ -237,7 +237,7 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
the validated availability zone. the validated availability zone.
""" """
refresh_az = False refresh_az = False
type_azs = vol_utils.extract_availability_zones_from_volume_type( type_azs = volume_utils.extract_availability_zones_from_volume_type(
volume_type) volume_type)
type_az_configured = type_azs is not None type_az_configured = type_azs is not None
if type_az_configured: if type_az_configured:
@ -339,12 +339,12 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
# Clone the existing key and associate a separate -- but # Clone the existing key and associate a separate -- but
# identical -- key with each volume. # identical -- key with each volume.
if encryption_key_id is not None: if encryption_key_id is not None:
encryption_key_id = vol_utils.clone_encryption_key( encryption_key_id = volume_utils.clone_encryption_key(
context, context,
key_manager, key_manager,
encryption_key_id) encryption_key_id)
else: else:
encryption_key_id = vol_utils.create_encryption_key( encryption_key_id = volume_utils.create_encryption_key(
context, context,
key_manager, key_manager,
volume_type_id) volume_type_id)
@ -456,7 +456,7 @@ class ExtractVolumeRequestTask(flow_utils.CinderTask):
specs = None specs = None
extra_specs = None extra_specs = None
if vol_utils.is_replicated_spec(extra_specs): if volume_utils.is_replicated_spec(extra_specs):
replication_status = fields.ReplicationStatus.ENABLED replication_status = fields.ReplicationStatus.ENABLED
else: else:
replication_status = fields.ReplicationStatus.DISABLED replication_status = fields.ReplicationStatus.DISABLED
@ -740,13 +740,13 @@ class VolumeCastTask(flow_utils.CinderTask):
# If cgroup_id existed, we should cast volume to the scheduler # If cgroup_id existed, we should cast volume to the scheduler
# to choose a proper pool whose backend is same as CG's backend. # to choose a proper pool whose backend is same as CG's backend.
cgroup = objects.ConsistencyGroup.get_by_id(context, cgroup_id) cgroup = objects.ConsistencyGroup.get_by_id(context, cgroup_id)
request_spec['resource_backend'] = vol_utils.extract_host( request_spec['resource_backend'] = volume_utils.extract_host(
cgroup.resource_backend) cgroup.resource_backend)
elif group_id: elif group_id:
# If group_id exists, we should cast volume to the scheduler # If group_id exists, we should cast volume to the scheduler
# to choose a proper pool whose backend is same as group's backend. # to choose a proper pool whose backend is same as group's backend.
group = objects.Group.get_by_id(context, group_id) group = objects.Group.get_by_id(context, group_id)
request_spec['resource_backend'] = vol_utils.extract_host( request_spec['resource_backend'] = volume_utils.extract_host(
group.resource_backend) group.resource_backend)
elif snapshot_id and CONF.snapshot_same_host: elif snapshot_id and CONF.snapshot_same_host:
# NOTE(Rongze Zhu): A simple solution for bug 1008866. # NOTE(Rongze Zhu): A simple solution for bug 1008866.

View File

@ -84,7 +84,7 @@ from cinder.volume import group_types
from cinder.volume import rpcapi as volume_rpcapi from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import volume_migration from cinder.volume import volume_migration
from cinder.volume import volume_types from cinder.volume import volume_types
from cinder.volume import volume_utils as vol_utils from cinder.volume import volume_utils
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -205,7 +205,7 @@ class VolumeManager(manager.CleanableManager,
def _get_service(self, host=None, binary=constants.VOLUME_BINARY): def _get_service(self, host=None, binary=constants.VOLUME_BINARY):
host = host or self.host host = host or self.host
ctxt = context.get_admin_context() ctxt = context.get_admin_context()
svc_host = vol_utils.extract_host(host, 'backend') svc_host = volume_utils.extract_host(host, 'backend')
return objects.Service.get_by_args(ctxt, svc_host, binary) return objects.Service.get_by_args(ctxt, svc_host, binary)
def __init__(self, volume_driver=None, service_name=None, def __init__(self, volume_driver=None, service_name=None,
@ -315,7 +315,7 @@ class VolumeManager(manager.CleanableManager,
self.image_volume_cache = None self.image_volume_cache = None
def _count_allocated_capacity(self, ctxt, volume): def _count_allocated_capacity(self, ctxt, volume):
pool = vol_utils.extract_host(volume['host'], 'pool') pool = volume_utils.extract_host(volume['host'], 'pool')
if pool is None: if pool is None:
# No pool name encoded in host, so this is a legacy # No pool name encoded in host, so this is a legacy
# volume created before pool is introduced, ask # volume created before pool is introduced, ask
@ -329,8 +329,8 @@ class VolumeManager(manager.CleanableManager,
return return
if pool: if pool:
new_host = vol_utils.append_host(volume['host'], new_host = volume_utils.append_host(volume['host'],
pool) pool)
self.db.volume_update(ctxt, volume['id'], self.db.volume_update(ctxt, volume['id'],
{'host': new_host}) {'host': new_host})
else: else:
@ -339,7 +339,7 @@ class VolumeManager(manager.CleanableManager,
# volume_backend_name is None, use default pool name. # volume_backend_name is None, use default pool name.
# This is only for counting purpose, doesn't update DB. # This is only for counting purpose, doesn't update DB.
pool = (self.driver.configuration.safe_get( pool = (self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host( 'volume_backend_name') or volume_utils.extract_host(
volume['host'], 'pool', True)) volume['host'], 'pool', True))
try: try:
pool_stat = self.stats['pools'][pool] pool_stat = self.stats['pools'][pool]
@ -563,7 +563,7 @@ class VolumeManager(manager.CleanableManager,
self.driver.set_initialized() self.driver.set_initialized()
# Keep the image tmp file clean when init host. # Keep the image tmp file clean when init host.
backend_name = vol_utils.extract_host(self.service_topic_queue) backend_name = volume_utils.extract_host(self.service_topic_queue)
image_utils.cleanup_temporary_file(backend_name) image_utils.cleanup_temporary_file(backend_name)
# Migrate any ConfKeyManager keys based on fixed_key to the currently # Migrate any ConfKeyManager keys based on fixed_key to the currently
@ -669,9 +669,10 @@ class VolumeManager(manager.CleanableManager,
def _set_resource_host(self, resource): def _set_resource_host(self, resource):
"""Set the host field on the DB to our own when we are clustered.""" """Set the host field on the DB to our own when we are clustered."""
if (resource.is_clustered and if (resource.is_clustered and
not vol_utils.hosts_are_equivalent(resource.host, self.host)): not volume_utils.hosts_are_equivalent(resource.host,
pool = vol_utils.extract_host(resource.host, 'pool') self.host)):
resource.host = vol_utils.append_host(self.host, pool) pool = volume_utils.extract_host(resource.host, 'pool')
resource.host = volume_utils.append_host(self.host, pool)
resource.save() resource.save()
@objects.Volume.set_workers @objects.Volume.set_workers
@ -785,8 +786,9 @@ class VolumeManager(manager.CleanableManager,
def _check_is_our_resource(self, resource): def _check_is_our_resource(self, resource):
if resource.host: if resource.host:
res_backend = vol_utils.extract_host(resource.service_topic_queue) res_backend = volume_utils.extract_host(
backend = vol_utils.extract_host(self.service_topic_queue) resource.service_topic_queue)
backend = volume_utils.extract_host(self.service_topic_queue)
if res_backend != backend: if res_backend != backend:
msg = (_('Invalid %(resource)s: %(resource)s %(id)s is not ' msg = (_('Invalid %(resource)s: %(resource)s %(id)s is not '
'local to %(backend)s.') % 'local to %(backend)s.') %
@ -1305,7 +1307,7 @@ class VolumeManager(manager.CleanableManager,
raise exception.InvalidVolume( raise exception.InvalidVolume(
reason=_("being attached by different mode")) reason=_("being attached by different mode"))
host_name_sanitized = vol_utils.sanitize_hostname( host_name_sanitized = volume_utils.sanitize_hostname(
host_name) if host_name else None host_name) if host_name else None
if instance_uuid: if instance_uuid:
attachments = ( attachments = (
@ -2162,11 +2164,11 @@ class VolumeManager(manager.CleanableManager,
try: try:
size_in_mb = int(src_vol['size']) * units.Ki # vol size is in GB size_in_mb = int(src_vol['size']) * units.Ki # vol size is in GB
vol_utils.copy_volume(src_attach_info['device']['path'], volume_utils.copy_volume(src_attach_info['device']['path'],
dest_attach_info['device']['path'], dest_attach_info['device']['path'],
size_in_mb, size_in_mb,
self.configuration.volume_dd_blocksize, self.configuration.volume_dd_blocksize,
sparse=sparse_copy_volume) sparse=sparse_copy_volume)
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error("Failed to copy volume %(src)s to %(dest)s.", LOG.error("Failed to copy volume %(src)s to %(dest)s.",
@ -2197,7 +2199,7 @@ class VolumeManager(manager.CleanableManager,
new_vol_values['volume_type_id'] = new_type_id new_vol_values['volume_type_id'] = new_type_id
if volume_types.volume_types_encryption_changed( if volume_types.volume_types_encryption_changed(
ctxt, volume.volume_type_id, new_type_id): ctxt, volume.volume_type_id, new_type_id):
encryption_key_id = vol_utils.create_encryption_key( encryption_key_id = volume_utils.create_encryption_key(
ctxt, self.key_manager, new_type_id) ctxt, self.key_manager, new_type_id)
new_vol_values['encryption_key_id'] = encryption_key_id new_vol_values['encryption_key_id'] = encryption_key_id
@ -2665,7 +2667,7 @@ class VolumeManager(manager.CleanableManager,
volume, volume,
event_suffix, event_suffix,
extra_usage_info=None): extra_usage_info=None):
vol_utils.notify_about_volume_usage( volume_utils.notify_about_volume_usage(
context, volume, event_suffix, context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host) extra_usage_info=extra_usage_info, host=self.host)
@ -2674,7 +2676,7 @@ class VolumeManager(manager.CleanableManager,
snapshot, snapshot,
event_suffix, event_suffix,
extra_usage_info=None): extra_usage_info=None):
vol_utils.notify_about_snapshot_usage( volume_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix, context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host) extra_usage_info=extra_usage_info, host=self.host)
@ -2684,7 +2686,7 @@ class VolumeManager(manager.CleanableManager,
event_suffix, event_suffix,
volumes=None, volumes=None,
extra_usage_info=None): extra_usage_info=None):
vol_utils.notify_about_group_usage( volume_utils.notify_about_group_usage(
context, group, event_suffix, context, group, event_suffix,
extra_usage_info=extra_usage_info, host=self.host) extra_usage_info=extra_usage_info, host=self.host)
@ -2693,7 +2695,7 @@ class VolumeManager(manager.CleanableManager,
context, group.id) context, group.id)
if volumes: if volumes:
for volume in volumes: for volume in volumes:
vol_utils.notify_about_volume_usage( volume_utils.notify_about_volume_usage(
context, volume, event_suffix, context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host) extra_usage_info=extra_usage_info, host=self.host)
@ -2703,7 +2705,7 @@ class VolumeManager(manager.CleanableManager,
event_suffix, event_suffix,
snapshots=None, snapshots=None,
extra_usage_info=None): extra_usage_info=None):
vol_utils.notify_about_group_snapshot_usage( volume_utils.notify_about_group_snapshot_usage(
context, group_snapshot, event_suffix, context, group_snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host) extra_usage_info=extra_usage_info, host=self.host)
@ -2712,7 +2714,7 @@ class VolumeManager(manager.CleanableManager,
context, group_snapshot.id) context, group_snapshot.id)
if snapshots: if snapshots:
for snapshot in snapshots: for snapshot in snapshots:
vol_utils.notify_about_snapshot_usage( volume_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix, context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host) extra_usage_info=extra_usage_info, host=self.host)
@ -2770,11 +2772,11 @@ class VolumeManager(manager.CleanableManager,
for attachment in attachments] for attachment in attachments]
nova_api.extend_volume(context, instance_uuids, volume.id) nova_api.extend_volume(context, instance_uuids, volume.id)
pool = vol_utils.extract_host(volume.host, 'pool') pool = volume_utils.extract_host(volume.host, 'pool')
if pool is None: if pool is None:
# Legacy volume, put them into default pool # Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get( pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host( 'volume_backend_name') or volume_utils.extract_host(
volume.host, 'pool', True) volume.host, 'pool', True)
try: try:
@ -2791,10 +2793,10 @@ class VolumeManager(manager.CleanableManager,
def _is_our_backend(self, host, cluster_name): def _is_our_backend(self, host, cluster_name):
return ((not cluster_name and return ((not cluster_name and
vol_utils.hosts_are_equivalent(self.driver.host, host)) or volume_utils.hosts_are_equivalent(self.driver.host, host)) or
(cluster_name and (cluster_name and
vol_utils.hosts_are_equivalent(self.driver.cluster_name, volume_utils.hosts_are_equivalent(self.driver.cluster_name,
cluster_name))) cluster_name)))
def retype(self, context, volume, new_type_id, host, def retype(self, context, volume, new_type_id, host,
migration_policy='never', reservations=None, migration_policy='never', reservations=None,
@ -2954,7 +2956,7 @@ class VolumeManager(manager.CleanableManager,
replication_diff = diff_specs.get('replication_enabled') replication_diff = diff_specs.get('replication_enabled')
if replication_diff: if replication_diff:
is_replicated = vol_utils.is_boolean_str(replication_diff[1]) is_replicated = volume_utils.is_boolean_str(replication_diff[1])
if is_replicated: if is_replicated:
replication_status = fields.ReplicationStatus.ENABLED replication_status = fields.ReplicationStatus.ENABLED
else: else:
@ -2973,11 +2975,11 @@ class VolumeManager(manager.CleanableManager,
def _update_stats_for_managed(self, volume_reference): def _update_stats_for_managed(self, volume_reference):
# Update volume stats # Update volume stats
pool = vol_utils.extract_host(volume_reference.host, 'pool') pool = volume_utils.extract_host(volume_reference.host, 'pool')
if pool is None: if pool is None:
# Legacy volume, put them into default pool # Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get( pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host( 'volume_backend_name') or volume_utils.extract_host(
volume_reference.host, 'pool', True) volume_reference.host, 'pool', True)
try: try:
@ -3440,12 +3442,13 @@ class VolumeManager(manager.CleanableManager,
def _update_allocated_capacity(self, vol, decrement=False, host=None): def _update_allocated_capacity(self, vol, decrement=False, host=None):
# Update allocated capacity in volume stats # Update allocated capacity in volume stats
host = host or vol['host'] host = host or vol['host']
pool = vol_utils.extract_host(host, 'pool') pool = volume_utils.extract_host(host, 'pool')
if pool is None: if pool is None:
# Legacy volume, put them into default pool # Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get( pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(host, 'pool', 'volume_backend_name') or volume_utils.extract_host(host,
True) 'pool',
True)
vol_size = -vol['size'] if decrement else vol['size'] vol_size = -vol['size'] if decrement else vol['size']
try: try: