From edeac132a19dc05a4108c630ebdfd02de9fd92ef Mon Sep 17 00:00:00 2001 From: whoami-rajat Date: Tue, 28 Dec 2021 12:44:42 +0000 Subject: [PATCH] Add cinder-manage command to update service_uuid In some deployments, after an upgrade, we remove the old service records and create new ones. This leaves the volumes with the service_uuid pointing to the old (deleted) service and causes an issue while purging out the deleted service records. This patch adds a cinder-manage command to update the service_uuid field of volumes with the following command: ``cinder-manage volume update_service`` The service_uuid of volumes associated with old service_uuid also gets updated when cinder creates a new service. Change-Id: I0b13c6351733b8163bcf6e73c939c375493dcba5 --- cinder/cmd/manage.py | 10 ++++++ cinder/db/api.py | 5 +++ cinder/db/sqlalchemy/api.py | 26 ++++++++++++++ cinder/service.py | 4 +++ cinder/tests/unit/test_db_api.py | 36 +++++++++++++++++++ doc/source/cli/cinder-manage.rst | 9 +++++ .../update-service-uuid-f25dbb05efd45d87.yaml | 15 ++++++++ 7 files changed, 105 insertions(+) create mode 100644 releasenotes/notes/update-service-uuid-f25dbb05efd45d87.yaml diff --git a/cinder/cmd/manage.py b/cinder/cmd/manage.py index a573768f43f..f368a6467bf 100644 --- a/cinder/cmd/manage.py +++ b/cinder/cmd/manage.py @@ -638,6 +638,16 @@ class VolumeCommands(object): db.volume_update(ctxt, v['id'], {'host': newhost}) + def update_service(self): + """Modify the service uuid associated with a volume. + + In certain upgrade cases, we create new cinder services and delete the + records of old ones, however, the volumes created with old service + still contain the service uuid of the old services. + """ + ctxt = context.get_admin_context() + db.volume_update_all_by_service(ctxt) + class ConfigCommands(object): """Class for exposing the flags defined by flag_file(s).""" diff --git a/cinder/db/api.py b/cinder/db/api.py index b8792edf152..c909a3c21fc 100644 --- a/cinder/db/api.py +++ b/cinder/db/api.py @@ -460,6 +460,11 @@ def volume_get_all_by_host(context, host, filters=None): return IMPL.volume_get_all_by_host(context, host, filters=filters) +def volume_update_all_by_service(context): + """Update all volumes associated with an old service.""" + return IMPL.volume_update_all_by_service(context) + + def volume_get_all_by_group(context, group_id, filters=None): """Get all volumes belonging to a consistency group.""" return IMPL.volume_get_all_by_group(context, group_id, filters=filters) diff --git a/cinder/db/sqlalchemy/api.py b/cinder/db/sqlalchemy/api.py index 426bd4f65a1..7b2d2a8431b 100644 --- a/cinder/db/sqlalchemy/api.py +++ b/cinder/db/sqlalchemy/api.py @@ -2857,6 +2857,32 @@ def volume_get_all_by_group(context, group_id, filters=None): return query.all() +@require_admin_context +@main_context_manager.writer +def volume_update_all_by_service(context): + """Ensure volumes have the correct service_uuid value for their host. + + In some deployment tools, when performing an upgrade, all service records + are recreated including c-vol service which gets a new record in the + services table, though its host name is constant. Later we then delete the + old service record. + As a consequence, the volumes have the right host name but the service + UUID needs to be updated to the ID of the new service record. + + :param context: context to query under + """ + # Get all cinder-volume services + services = service_get_all(context, binary='cinder-volume') + for service in services: + query = model_query(context, models.Volume) + query = query.filter( + _filter_host( + models.Volume.host, service.host), + models.Volume.service_uuid != service.uuid) + query.update( + {"service_uuid": service.uuid}, synchronize_session=False) + + @require_context @main_context_manager.reader def volume_get_all_by_generic_group(context, group_id, filters=None): diff --git a/cinder/service.py b/cinder/service.py index ee9eba9380c..accab15463a 100644 --- a/cinder/service.py +++ b/cinder/service.py @@ -42,6 +42,7 @@ profiler_opts = importutils.try_import('osprofiler.opts') from cinder.common import constants from cinder import context from cinder import coordination +from cinder import db from cinder import exception from cinder.i18n import _ from cinder import objects @@ -366,6 +367,9 @@ class Service(service.Service): # If we have updated the service_ref with replication data from # the cluster it will be saved. service_ref.save() + # Update all volumes that are associated with an old service with + # the new service uuid + db.volume_update_all_by_service(context) def __getattr__(self, key: str): manager = self.__dict__.get('manager', None) diff --git a/cinder/tests/unit/test_db_api.py b/cinder/tests/unit/test_db_api.py index 47ca702ca0b..369175d9f61 100644 --- a/cinder/tests/unit/test_db_api.py +++ b/cinder/tests/unit/test_db_api.py @@ -911,6 +911,42 @@ class DBAPIVolumeTestCase(BaseTest): filters={'foo': 'bar'}) self.assertEqual([], vols) + def test_volume_update_all_by_service(self): + volume_service_uuid = '918f24b6-c4c9-48e6-86c6-6871e91f4779' + alt_vol_service_uuid = '4b3356a0-31e1-4cec-af1c-07e1e0d7dcf0' + service_uuid_1 = 'c7b169f8-8da6-4330-b462-0467069371e2' + service_uuid_2 = '38d41b71-2f4e-4d3e-8206-d51ace608bca' + host = 'fake_host' + alt_host = 'alt_fake_host' + binary = 'cinder-volume' + # Create 3 volumes with host 'fake_host' + for i in range(3): + db.volume_create(self.ctxt, { + 'service_uuid': volume_service_uuid, + 'host': host, + 'volume_type_id': fake.VOLUME_TYPE_ID}) + # Create 2 volumes with host 'alt_fake_host' + for i in range(2): + db.volume_create(self.ctxt, { + 'service_uuid': alt_vol_service_uuid, + 'host': alt_host, + 'volume_type_id': fake.VOLUME_TYPE_ID}) + # Create service entry for 'fake_host' + utils.create_service( + self.ctxt, + {'uuid': service_uuid_1, 'host': host, 'binary': binary}) + # Create service entry for 'alt_fake_host' + utils.create_service( + self.ctxt, + {'uuid': service_uuid_2, 'host': alt_host, 'binary': binary}) + db.volume_update_all_by_service(self.ctxt) + volumes = db.volume_get_all(self.ctxt) + for volume in volumes: + if volume.host == host: + self.assertEqual(service_uuid_1, volume.service_uuid) + elif volume.host == alt_host: + self.assertEqual(service_uuid_2, volume.service_uuid) + def test_volume_get_all_by_project(self): volumes = [] for i in range(3): diff --git a/doc/source/cli/cinder-manage.rst b/doc/source/cli/cinder-manage.rst index bc092117445..f605459f2cc 100644 --- a/doc/source/cli/cinder-manage.rst +++ b/doc/source/cli/cinder-manage.rst @@ -166,6 +166,15 @@ Delete a volume without first checking that the volume is available. Updates the host name of all volumes currently associated with a specified host. +``cinder-manage volume update_service`` + +When upgrading cinder, new service entries are created in the database as the +existing cinder-volume host(s) are upgraded. In some cases, rows in the +volumes table keep references to the old service, which can prevent the old +services from being deleted when the database is purged. This command makes +sure that all volumes have updated service references for all volumes on all +cinder-volume hosts. + Cinder Host ~~~~~~~~~~~ diff --git a/releasenotes/notes/update-service-uuid-f25dbb05efd45d87.yaml b/releasenotes/notes/update-service-uuid-f25dbb05efd45d87.yaml new file mode 100644 index 00000000000..495598e9fb5 --- /dev/null +++ b/releasenotes/notes/update-service-uuid-f25dbb05efd45d87.yaml @@ -0,0 +1,15 @@ +--- +features: + - | + Added a new cinder-manage command to handle the situation where database + purges would not complete due to the volumes table holding references to + deleted services. The new command makes sure that all volumes have a + reference only to the correct service_uuid, which will allow old service + records to be purged from the database. + + Command: ``cinder-manage volume update_service`` + - | + When Cinder creates a new cinder-volume service, it now also immediately + updates the service_uuid for all volumes associated with that + cinder-volume host. In some cases, this was preventing the database purge + operation from completing successfully.