Merge "Fix non iSCSI attach serialization"
This commit is contained in:
commit
6cb210c4d9
@ -109,7 +109,9 @@ def _get_non_shared_target_hosts(ctxt):
|
|||||||
constants.VOLUME_TOPIC)
|
constants.VOLUME_TOPIC)
|
||||||
for service in services:
|
for service in services:
|
||||||
capabilities = rpcapi.get_capabilities(ctxt, service.host, True)
|
capabilities = rpcapi.get_capabilities(ctxt, service.host, True)
|
||||||
if not capabilities.get('shared_targets', True):
|
# Select only non iSCSI connections and iSCSI that are explicit
|
||||||
|
if (capabilities.get('storage_protocol') != 'iSCSI' or
|
||||||
|
not capabilities.get('shared_targets', True)):
|
||||||
hosts.append(service.host)
|
hosts.append(service.host)
|
||||||
numvols_needing_update += db_api.model_query(
|
numvols_needing_update += db_api.model_query(
|
||||||
ctxt, models.Volume).filter_by(
|
ctxt, models.Volume).filter_by(
|
||||||
|
@ -2262,80 +2262,66 @@ class TestVolumeSharedTargetsOnlineMigration(test.TestCase):
|
|||||||
self.patch('cinder.objects.Service.get_minimum_rpc_version',
|
self.patch('cinder.objects.Service.get_minimum_rpc_version',
|
||||||
side_effect=_get_minimum_rpc_version_mock)
|
side_effect=_get_minimum_rpc_version_mock)
|
||||||
|
|
||||||
@mock.patch('cinder.objects.Service.get_minimum_obj_version',
|
|
||||||
return_value='1.8')
|
|
||||||
def test_shared_targets_migrations(self, mock_version):
|
|
||||||
"""Ensure we can update the column."""
|
|
||||||
ctxt = context.get_admin_context()
|
ctxt = context.get_admin_context()
|
||||||
|
# default value in db for shared_targets on a volume
|
||||||
|
# is True, so don't need to set it here explicitly
|
||||||
|
for i in range(3):
|
||||||
sqlalchemy_api.volume_create(
|
sqlalchemy_api.volume_create(
|
||||||
ctxt,
|
ctxt,
|
||||||
{'host': 'host1@lvm-driver1#lvm-driver1',
|
{'host': 'host1@lvm-driver1#lvm-driver1',
|
||||||
'service_uuid': 'f080f895-cff2-4eb3-9c61-050c060b59ad'})
|
'service_uuid': 'f080f895-cff2-4eb3-9c61-050c060b59ad'})
|
||||||
|
|
||||||
# Create another one correct setting
|
|
||||||
sqlalchemy_api.volume_create(
|
|
||||||
ctxt,
|
|
||||||
{'host': 'host1@lvm-driver1#lvm-driver1',
|
|
||||||
'shared_targets': False,
|
|
||||||
'service_uuid': 'f080f895-cff2-4eb3-9c61-050c060b59ad'})
|
|
||||||
|
|
||||||
# Need a service to query
|
|
||||||
values = {
|
values = {
|
||||||
'host': 'host1@lvm-driver1',
|
'host': 'host1@lvm-driver1',
|
||||||
'binary': constants.VOLUME_BINARY,
|
'binary': constants.VOLUME_BINARY,
|
||||||
'topic': constants.VOLUME_TOPIC,
|
'topic': constants.VOLUME_TOPIC,
|
||||||
'uuid': 'f080f895-cff2-4eb3-9c61-050c060b59ad'}
|
'uuid': 'f080f895-cff2-4eb3-9c61-050c060b59ad'}
|
||||||
utils.create_service(ctxt, values)
|
utils.create_service(ctxt, values)
|
||||||
|
self.ctxt = ctxt
|
||||||
|
|
||||||
|
@mock.patch('cinder.objects.Service.get_minimum_obj_version',
|
||||||
|
return_value='1.8')
|
||||||
|
def test_shared_targets_migrations(self, mock_version):
|
||||||
|
"""Ensure we can update the column."""
|
||||||
# Run the migration and verify that we updated 1 entry
|
# Run the migration and verify that we updated 1 entry
|
||||||
with mock.patch('cinder.volume.rpcapi.VolumeAPI.get_capabilities',
|
with mock.patch('cinder.volume.rpcapi.VolumeAPI.get_capabilities',
|
||||||
return_value={'shared_targets': False}):
|
return_value={'connection_protocol': 'iSCSI',
|
||||||
|
'shared_targets': False}):
|
||||||
total, updated = (
|
total, updated = (
|
||||||
cinder_manage.shared_targets_online_data_migration(
|
cinder_manage.shared_targets_online_data_migration(
|
||||||
ctxt, 10))
|
self.ctxt, 10))
|
||||||
self.assertEqual(1, total)
|
self.assertEqual(3, total)
|
||||||
self.assertEqual(1, updated)
|
self.assertEqual(3, updated)
|
||||||
|
|
||||||
|
@mock.patch('cinder.objects.Service.get_minimum_obj_version',
|
||||||
|
return_value='1.8')
|
||||||
|
def test_shared_targets_migrations_non_iscsi(self, mock_version):
|
||||||
|
"""Ensure we can update the column."""
|
||||||
|
# Run the migration and verify that we updated 1 entry
|
||||||
|
with mock.patch('cinder.volume.rpcapi.VolumeAPI.get_capabilities',
|
||||||
|
return_value={'connection_protocol': 'RBD'}):
|
||||||
|
total, updated = (
|
||||||
|
cinder_manage.shared_targets_online_data_migration(
|
||||||
|
self.ctxt, 10))
|
||||||
|
self.assertEqual(3, total)
|
||||||
|
self.assertEqual(3, updated)
|
||||||
|
|
||||||
@mock.patch('cinder.objects.Service.get_minimum_obj_version',
|
@mock.patch('cinder.objects.Service.get_minimum_obj_version',
|
||||||
return_value='1.8')
|
return_value='1.8')
|
||||||
def test_shared_targets_migrations_with_limit(self, mock_version):
|
def test_shared_targets_migrations_with_limit(self, mock_version):
|
||||||
"""Ensure we update in batches."""
|
"""Ensure we update in batches."""
|
||||||
ctxt = context.get_admin_context()
|
|
||||||
# default value in db for shared_targets on a volume
|
|
||||||
# is True, so don't need to set it here explicitly
|
|
||||||
sqlalchemy_api.volume_create(
|
|
||||||
ctxt,
|
|
||||||
{'host': 'host1@lvm-driver1#lvm-driver1',
|
|
||||||
'service_uuid': 'f080f895-cff2-4eb3-9c61-050c060b59ad'})
|
|
||||||
|
|
||||||
sqlalchemy_api.volume_create(
|
|
||||||
ctxt,
|
|
||||||
{'host': 'host1@lvm-driver1#lvm-driver1',
|
|
||||||
'service_uuid': 'f080f895-cff2-4eb3-9c61-050c060b59ad'})
|
|
||||||
|
|
||||||
sqlalchemy_api.volume_create(
|
|
||||||
ctxt,
|
|
||||||
{'host': 'host1@lvm-driver1#lvm-driver1',
|
|
||||||
'service_uuid': 'f080f895-cff2-4eb3-9c61-050c060b59ad'})
|
|
||||||
|
|
||||||
values = {
|
|
||||||
'host': 'host1@lvm-driver1',
|
|
||||||
'binary': constants.VOLUME_BINARY,
|
|
||||||
'topic': constants.VOLUME_TOPIC,
|
|
||||||
'uuid': 'f080f895-cff2-4eb3-9c61-050c060b59ad'}
|
|
||||||
utils.create_service(ctxt, values)
|
|
||||||
|
|
||||||
# Run the migration and verify that we updated 1 entry
|
# Run the migration and verify that we updated 1 entry
|
||||||
with mock.patch('cinder.volume.rpcapi.VolumeAPI.get_capabilities',
|
with mock.patch('cinder.volume.rpcapi.VolumeAPI.get_capabilities',
|
||||||
return_value={'shared_targets': False}):
|
return_value={'connection_protocol': 'iSCSI',
|
||||||
|
'shared_targets': False}):
|
||||||
total, updated = (
|
total, updated = (
|
||||||
cinder_manage.shared_targets_online_data_migration(
|
cinder_manage.shared_targets_online_data_migration(
|
||||||
ctxt, 2))
|
self.ctxt, 2))
|
||||||
self.assertEqual(3, total)
|
self.assertEqual(3, total)
|
||||||
self.assertEqual(2, updated)
|
self.assertEqual(2, updated)
|
||||||
|
|
||||||
total, updated = (
|
total, updated = (
|
||||||
cinder_manage.shared_targets_online_data_migration(
|
cinder_manage.shared_targets_online_data_migration(
|
||||||
ctxt, 2))
|
self.ctxt, 2))
|
||||||
self.assertEqual(1, total)
|
self.assertEqual(1, total)
|
||||||
self.assertEqual(1, updated)
|
self.assertEqual(1, updated)
|
||||||
|
@ -689,14 +689,15 @@ class VolumeManager(manager.CleanableManager,
|
|||||||
self._update_allocated_capacity(volume, decrement=True,
|
self._update_allocated_capacity(volume, decrement=True,
|
||||||
host=original_host)
|
host=original_host)
|
||||||
|
|
||||||
shared_targets = (
|
# Shared targets is only relevant for iSCSI connections.
|
||||||
1
|
# We default to True to be on the safe side.
|
||||||
if self.driver.capabilities.get('shared_targets', True)
|
volume.shared_targets = (
|
||||||
else 0)
|
self.driver.capabilities.get('storage_protocol') == 'iSCSI' and
|
||||||
updates = {'service_uuid': self.service_uuid,
|
self.driver.capabilities.get('shared_targets', True))
|
||||||
'shared_targets': shared_targets}
|
# TODO(geguileo): service_uuid won't be enough on Active/Active
|
||||||
|
# deployments. There can be 2 services handling volumes from the same
|
||||||
volume.update(updates)
|
# backend.
|
||||||
|
volume.service_uuid = self.service_uuid
|
||||||
volume.save()
|
volume.save()
|
||||||
|
|
||||||
LOG.info("Created volume successfully.", resource=volume)
|
LOG.info("Created volume successfully.", resource=volume)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user