From 5d574edbbe20f2102d2e7aa08580ef53aef0cd28 Mon Sep 17 00:00:00 2001 From: Sylvan Le Deunff Date: Mon, 6 May 2024 12:30:24 +0200 Subject: [PATCH] Fix share server migration not reusing allocations Update share_server_id on existing allocations if reusing share network during share server migration. And fix the way vserver_name is retrieved in NetApp driver to permit migrating a share server more than once. Closes-Bug: #2064907 Change-Id: I2d6b4ca4e29dea4114a1a4e55e4d913f549cb94b --- .../dataontap/cluster_mode/lib_multi_svm.py | 8 +- manila/share/manager.py | 86 ++++++++++--------- .../cluster_mode/test_lib_multi_svm.py | 18 ++-- manila/tests/share/test_manager.py | 13 ++- ...ug-2064907-fix-share-server-migration.yaml | 10 +++ 5 files changed, 74 insertions(+), 61 deletions(-) create mode 100644 releasenotes/notes/bug-2064907-fix-share-server-migration.yaml diff --git a/manila/share/drivers/netapp/dataontap/cluster_mode/lib_multi_svm.py b/manila/share/drivers/netapp/dataontap/cluster_mode/lib_multi_svm.py index 98ec94c34a..ee9f438944 100644 --- a/manila/share/drivers/netapp/dataontap/cluster_mode/lib_multi_svm.py +++ b/manila/share/drivers/netapp/dataontap/cluster_mode/lib_multi_svm.py @@ -1284,8 +1284,8 @@ class NetAppCmodeMultiSVMFileStorageLibrary( # 1. Checks if both source and destination clients support SVM Migrate. if (dest_client.is_svm_migrate_supported() and src_client.is_svm_migrate_supported()): - source_share_server_name = self._get_vserver_name( - source_share_server['identifier']) + source_share_server_name = ( + source_share_server["backend_details"]["vserver_name"]) # Check if the migration is supported. try: @@ -1523,8 +1523,8 @@ class NetAppCmodeMultiSVMFileStorageLibrary( # Prepare the migration request. src_cluster_name = src_client.get_cluster_name() - source_share_server_name = self._get_vserver_name( - source_share_server['identifier']) + source_share_server_name = ( + source_share_server["backend_details"]["vserver_name"]) # 3. Send the migration request to ONTAP. try: diff --git a/manila/share/manager.py b/manila/share/manager.py index 228793b437..392f6fccd7 100644 --- a/manila/share/manager.py +++ b/manila/share/manager.py @@ -6224,33 +6224,13 @@ class ShareManager(manager.SchedulerDependentManager): dest_snss = self.db.share_network_subnet_get_all_by_share_server_id( context, dest_share_server['id']) + existing_allocations = ( + self.db.network_allocations_get_for_share_server( + context, dest_share_server['id'])) + migration_reused_network_allocations = len(existing_allocations) == 0 migration_extended_network_allocations = ( CONF.server_migration_extend_neutron_network) - # NOTE: Network allocations are extended to the destination host on - # previous (migration_start) step, i.e. port bindings are created on - # destination host with existing ports. The network allocations will be - # cut over on this (migration_complete) step, i.e. port bindings on - # destination host will be activated and bindings on source host will - # be deleted. - if migration_extended_network_allocations: - updated_allocations = ( - self.driver.network_api.cutover_network_allocations( - context, source_share_server)) - segmentation_id = self.db.share_server_backend_details_get_item( - context, dest_share_server['id'], 'segmentation_id') - alloc_update = { - 'segmentation_id': segmentation_id, - 'share_server_id': dest_share_server['id'] - } - subnet_update = { - 'segmentation_id': segmentation_id, - } - - migration_reused_network_allocations = (len( - self.db.network_allocations_get_for_share_server( - context, dest_share_server['id'])) == 0) - server_to_get_allocations = ( dest_share_server if not migration_reused_network_allocations @@ -6263,30 +6243,52 @@ class ShareManager(manager.SchedulerDependentManager): context, source_share_server, dest_share_server, share_instances, snapshot_instances, new_network_allocations) + alloc_update = { + 'share_server_id': dest_share_server['id'] + } + subnet_update = {} + if migration_extended_network_allocations: - for alloc in updated_allocations: - self.db.network_allocation_update(context, alloc['id'], - alloc_update) - for subnet in dest_snss: - self.db.share_network_subnet_update(context, subnet['id'], - subnet_update) - elif not migration_reused_network_allocations: + # NOTE: Network allocations are extended to the destination host on + # previous (migration_start) step, i.e. port bindings are created + # on destination host with existing ports. The network allocations + # will be cut over on this (migration_complete) step, i.e. port + # bindings on destination host will be activated and bindings on + # source host will be deleted. + updated_allocations = ( + self.driver.network_api.cutover_network_allocations( + context, source_share_server)) + segmentation_id = self.db.share_server_backend_details_get_item( + context, dest_share_server['id'], 'segmentation_id') + alloc_update.update({ + 'segmentation_id': segmentation_id + }) + subnet_update.update({ + 'segmentation_id': segmentation_id, + }) + elif migration_reused_network_allocations: + updated_allocations = ( + self.db.network_allocations_get_for_share_server( + context, source_share_server["id"])) + else: network_allocations = [] for net_allocation in new_network_allocations: network_allocations += net_allocation['network_allocations'] - all_allocations = [ - network_allocations, - new_network_allocations[0]['admin_network_allocations'] + updated_allocations = [ + *network_allocations, + *new_network_allocations[0]['admin_network_allocations'] ] - for allocations in all_allocations: - for allocation in allocations: - allocation_id = allocation['id'] - values = { - 'share_server_id': dest_share_server['id'] - } - self.db.network_allocation_update( - context, allocation_id, values) + + for allocation in updated_allocations: + allocation_id = allocation['id'] + self.db.network_allocation_update( + context, allocation_id, alloc_update) + + if subnet_update: + for subnet in dest_snss: + self.db.share_network_subnet_update(context, subnet['id'], + subnet_update) # If share server doesn't have an identifier, we didn't ask the driver # to create a brand new server - this was a nondisruptive migration diff --git a/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_multi_svm.py b/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_multi_svm.py index 0d7c46282c..a8dcb7b995 100644 --- a/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_multi_svm.py +++ b/manila/tests/share/drivers/netapp/dataontap/cluster_mode/test_lib_multi_svm.py @@ -2307,8 +2307,7 @@ class NetAppFileStorageLibraryTestCase(test.TestCase): if svm_migrate_supported: mock_src_is_svm_migrate_supported.assert_called_once() mock_find_matching_aggregates.assert_called_once() - mock_get_vserver_name.assert_called_once_with( - fake.SHARE_SERVER['id']) + mock_get_vserver_name.assert_not_called() mock_svm_migration_check_svm_mig.assert_called_once_with( fake.CLUSTER_NAME, fake.VSERVER1, fake.SHARE_SERVER, fake.AGGREGATES, self.mock_dest_client) @@ -2346,8 +2345,7 @@ class NetAppFileStorageLibraryTestCase(test.TestCase): mock_dest_is_svm_migrate_supported.assert_called_once() mock_src_is_svm_migrate_supported.assert_called_once() mock_find_matching_aggregates.assert_called_once() - mock_get_vserver_name.assert_called_once_with( - fake.SHARE_SERVER['id']) + mock_get_vserver_name.assert_not_called() mock_svm_migration_check_svm_mig.assert_called_once_with( fake.CLUSTER_NAME, fake.VSERVER1, fake.SHARE_SERVER, fake.AGGREGATES, self.mock_dest_client) @@ -2714,9 +2712,6 @@ class NetAppFileStorageLibraryTestCase(test.TestCase): mock.Mock(return_value=fake.IPSPACE)) mock_create_port = self.mock_object( self.library, '_create_port_and_broadcast_domain') - mock_get_vserver_name = self.mock_object( - self.library, '_get_vserver_name', - mock.Mock(return_value=fake.VSERVER1)) mock_get_cluster_name = self.mock_object( self.mock_src_client, 'get_cluster_name', mock.Mock(return_value=fake.CLUSTER_NAME)) @@ -2740,11 +2735,9 @@ class NetAppFileStorageLibraryTestCase(test.TestCase): node_name, fake.NODE_DATA_PORT, segmentation_id) mock_create_port.assert_called_once_with( fake.IPSPACE, network_info) - mock_get_vserver_name.assert_called_once_with( - self.fake_src_share_server['id']) self.assertTrue(mock_get_cluster_name.called) mock_svm_migration_start.assert_called_once_with( - fake.CLUSTER_NAME, fake.VSERVER1, fake.AGGREGATES, + fake.CLUSTER_NAME, self.fake_src_vserver, fake.AGGREGATES, dest_ipspace=fake.IPSPACE) self.assertTrue(mock_get_aggregates.called) self.assertEqual(expected_server_info, server_info) @@ -2809,11 +2802,10 @@ class NetAppFileStorageLibraryTestCase(test.TestCase): node_name, fake.NODE_DATA_PORT, segmentation_id) mock_create_port.assert_called_once_with( fake.IPSPACE, network_info) - mock_get_vserver_name.assert_called_once_with( - self.fake_src_share_server['id']) + mock_get_vserver_name.assert_not_called() self.assertTrue(mock_get_cluster_name.called) mock_svm_migration_start.assert_called_once_with( - fake.CLUSTER_NAME, fake.VSERVER1, fake.AGGREGATES, + fake.CLUSTER_NAME, self.fake_src_vserver, fake.AGGREGATES, dest_ipspace=fake.IPSPACE) self.assertTrue(mock_get_aggregates.called) mock_delete_ipspace.assert_called_once_with(fake.IPSPACE) diff --git a/manila/tests/share/test_manager.py b/manila/tests/share/test_manager.py index 5684f913c6..52865f46c8 100644 --- a/manila/tests/share/test_manager.py +++ b/manila/tests/share/test_manager.py @@ -10094,8 +10094,17 @@ class ShareManagerTestCase(test.TestCase): self.context, fake_share_network['id']) mock_subnet_get.assert_called_once_with( self.context, fake_dest_share_server['id']) - mock_allocations_get.assert_called_once_with( - self.context, fake_dest_share_server['id']) + + if need_network_allocation: + mock_allocations_get.assert_called_once_with( + self.context, fake_dest_share_server['id']) + else: + mock_allocations_get.assert_has_calls( + calls=[ + mock.call(self.context, fake_dest_share_server['id']), + mock.call(self.context, fake_source_share_server['id']), + ] + ) if not need_network_allocation: mock_form_server_setup_info.assert_called_once_with( diff --git a/releasenotes/notes/bug-2064907-fix-share-server-migration.yaml b/releasenotes/notes/bug-2064907-fix-share-server-migration.yaml new file mode 100644 index 0000000000..ad254be203 --- /dev/null +++ b/releasenotes/notes/bug-2064907-fix-share-server-migration.yaml @@ -0,0 +1,10 @@ +--- +fixes: + - | + When performing a share server migration without new share network, + reused allocations are properly updated with new share_server_id. + - | + In NetApp driver functions related to share server migration, + vserver_name is now retrieved directly from backend_details instead + of templating. This way, vserver_name is correct even for share + servers that have already been migrated once.