Merge "Fix share server migration not reusing allocations"
This commit is contained in:
commit
faec042d06
@ -1284,8 +1284,8 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
|
||||
# 1. Checks if both source and destination clients support SVM Migrate.
|
||||
if (dest_client.is_svm_migrate_supported()
|
||||
and src_client.is_svm_migrate_supported()):
|
||||
source_share_server_name = self._get_vserver_name(
|
||||
source_share_server['identifier'])
|
||||
source_share_server_name = (
|
||||
source_share_server["backend_details"]["vserver_name"])
|
||||
|
||||
# Check if the migration is supported.
|
||||
try:
|
||||
@ -1523,8 +1523,8 @@ class NetAppCmodeMultiSVMFileStorageLibrary(
|
||||
|
||||
# Prepare the migration request.
|
||||
src_cluster_name = src_client.get_cluster_name()
|
||||
source_share_server_name = self._get_vserver_name(
|
||||
source_share_server['identifier'])
|
||||
source_share_server_name = (
|
||||
source_share_server["backend_details"]["vserver_name"])
|
||||
|
||||
# 3. Send the migration request to ONTAP.
|
||||
try:
|
||||
|
@ -6224,33 +6224,13 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
dest_snss = self.db.share_network_subnet_get_all_by_share_server_id(
|
||||
context, dest_share_server['id'])
|
||||
|
||||
existing_allocations = (
|
||||
self.db.network_allocations_get_for_share_server(
|
||||
context, dest_share_server['id']))
|
||||
migration_reused_network_allocations = len(existing_allocations) == 0
|
||||
migration_extended_network_allocations = (
|
||||
CONF.server_migration_extend_neutron_network)
|
||||
|
||||
# NOTE: Network allocations are extended to the destination host on
|
||||
# previous (migration_start) step, i.e. port bindings are created on
|
||||
# destination host with existing ports. The network allocations will be
|
||||
# cut over on this (migration_complete) step, i.e. port bindings on
|
||||
# destination host will be activated and bindings on source host will
|
||||
# be deleted.
|
||||
if migration_extended_network_allocations:
|
||||
updated_allocations = (
|
||||
self.driver.network_api.cutover_network_allocations(
|
||||
context, source_share_server))
|
||||
segmentation_id = self.db.share_server_backend_details_get_item(
|
||||
context, dest_share_server['id'], 'segmentation_id')
|
||||
alloc_update = {
|
||||
'segmentation_id': segmentation_id,
|
||||
'share_server_id': dest_share_server['id']
|
||||
}
|
||||
subnet_update = {
|
||||
'segmentation_id': segmentation_id,
|
||||
}
|
||||
|
||||
migration_reused_network_allocations = (len(
|
||||
self.db.network_allocations_get_for_share_server(
|
||||
context, dest_share_server['id'])) == 0)
|
||||
|
||||
server_to_get_allocations = (
|
||||
dest_share_server
|
||||
if not migration_reused_network_allocations
|
||||
@ -6263,30 +6243,52 @@ class ShareManager(manager.SchedulerDependentManager):
|
||||
context, source_share_server, dest_share_server, share_instances,
|
||||
snapshot_instances, new_network_allocations)
|
||||
|
||||
alloc_update = {
|
||||
'share_server_id': dest_share_server['id']
|
||||
}
|
||||
subnet_update = {}
|
||||
|
||||
if migration_extended_network_allocations:
|
||||
for alloc in updated_allocations:
|
||||
self.db.network_allocation_update(context, alloc['id'],
|
||||
alloc_update)
|
||||
for subnet in dest_snss:
|
||||
self.db.share_network_subnet_update(context, subnet['id'],
|
||||
subnet_update)
|
||||
elif not migration_reused_network_allocations:
|
||||
# NOTE: Network allocations are extended to the destination host on
|
||||
# previous (migration_start) step, i.e. port bindings are created
|
||||
# on destination host with existing ports. The network allocations
|
||||
# will be cut over on this (migration_complete) step, i.e. port
|
||||
# bindings on destination host will be activated and bindings on
|
||||
# source host will be deleted.
|
||||
updated_allocations = (
|
||||
self.driver.network_api.cutover_network_allocations(
|
||||
context, source_share_server))
|
||||
segmentation_id = self.db.share_server_backend_details_get_item(
|
||||
context, dest_share_server['id'], 'segmentation_id')
|
||||
alloc_update.update({
|
||||
'segmentation_id': segmentation_id
|
||||
})
|
||||
subnet_update.update({
|
||||
'segmentation_id': segmentation_id,
|
||||
})
|
||||
elif migration_reused_network_allocations:
|
||||
updated_allocations = (
|
||||
self.db.network_allocations_get_for_share_server(
|
||||
context, source_share_server["id"]))
|
||||
else:
|
||||
network_allocations = []
|
||||
for net_allocation in new_network_allocations:
|
||||
network_allocations += net_allocation['network_allocations']
|
||||
|
||||
all_allocations = [
|
||||
network_allocations,
|
||||
new_network_allocations[0]['admin_network_allocations']
|
||||
updated_allocations = [
|
||||
*network_allocations,
|
||||
*new_network_allocations[0]['admin_network_allocations']
|
||||
]
|
||||
for allocations in all_allocations:
|
||||
for allocation in allocations:
|
||||
allocation_id = allocation['id']
|
||||
values = {
|
||||
'share_server_id': dest_share_server['id']
|
||||
}
|
||||
self.db.network_allocation_update(
|
||||
context, allocation_id, values)
|
||||
|
||||
for allocation in updated_allocations:
|
||||
allocation_id = allocation['id']
|
||||
self.db.network_allocation_update(
|
||||
context, allocation_id, alloc_update)
|
||||
|
||||
if subnet_update:
|
||||
for subnet in dest_snss:
|
||||
self.db.share_network_subnet_update(context, subnet['id'],
|
||||
subnet_update)
|
||||
|
||||
# If share server doesn't have an identifier, we didn't ask the driver
|
||||
# to create a brand new server - this was a nondisruptive migration
|
||||
|
@ -2307,8 +2307,7 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
|
||||
if svm_migrate_supported:
|
||||
mock_src_is_svm_migrate_supported.assert_called_once()
|
||||
mock_find_matching_aggregates.assert_called_once()
|
||||
mock_get_vserver_name.assert_called_once_with(
|
||||
fake.SHARE_SERVER['id'])
|
||||
mock_get_vserver_name.assert_not_called()
|
||||
mock_svm_migration_check_svm_mig.assert_called_once_with(
|
||||
fake.CLUSTER_NAME, fake.VSERVER1, fake.SHARE_SERVER,
|
||||
fake.AGGREGATES, self.mock_dest_client)
|
||||
@ -2346,8 +2345,7 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
|
||||
mock_dest_is_svm_migrate_supported.assert_called_once()
|
||||
mock_src_is_svm_migrate_supported.assert_called_once()
|
||||
mock_find_matching_aggregates.assert_called_once()
|
||||
mock_get_vserver_name.assert_called_once_with(
|
||||
fake.SHARE_SERVER['id'])
|
||||
mock_get_vserver_name.assert_not_called()
|
||||
mock_svm_migration_check_svm_mig.assert_called_once_with(
|
||||
fake.CLUSTER_NAME, fake.VSERVER1, fake.SHARE_SERVER,
|
||||
fake.AGGREGATES, self.mock_dest_client)
|
||||
@ -2714,9 +2712,6 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
|
||||
mock.Mock(return_value=fake.IPSPACE))
|
||||
mock_create_port = self.mock_object(
|
||||
self.library, '_create_port_and_broadcast_domain')
|
||||
mock_get_vserver_name = self.mock_object(
|
||||
self.library, '_get_vserver_name',
|
||||
mock.Mock(return_value=fake.VSERVER1))
|
||||
mock_get_cluster_name = self.mock_object(
|
||||
self.mock_src_client, 'get_cluster_name',
|
||||
mock.Mock(return_value=fake.CLUSTER_NAME))
|
||||
@ -2740,11 +2735,9 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
|
||||
node_name, fake.NODE_DATA_PORT, segmentation_id)
|
||||
mock_create_port.assert_called_once_with(
|
||||
fake.IPSPACE, network_info)
|
||||
mock_get_vserver_name.assert_called_once_with(
|
||||
self.fake_src_share_server['id'])
|
||||
self.assertTrue(mock_get_cluster_name.called)
|
||||
mock_svm_migration_start.assert_called_once_with(
|
||||
fake.CLUSTER_NAME, fake.VSERVER1, fake.AGGREGATES,
|
||||
fake.CLUSTER_NAME, self.fake_src_vserver, fake.AGGREGATES,
|
||||
dest_ipspace=fake.IPSPACE)
|
||||
self.assertTrue(mock_get_aggregates.called)
|
||||
self.assertEqual(expected_server_info, server_info)
|
||||
@ -2809,11 +2802,10 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
|
||||
node_name, fake.NODE_DATA_PORT, segmentation_id)
|
||||
mock_create_port.assert_called_once_with(
|
||||
fake.IPSPACE, network_info)
|
||||
mock_get_vserver_name.assert_called_once_with(
|
||||
self.fake_src_share_server['id'])
|
||||
mock_get_vserver_name.assert_not_called()
|
||||
self.assertTrue(mock_get_cluster_name.called)
|
||||
mock_svm_migration_start.assert_called_once_with(
|
||||
fake.CLUSTER_NAME, fake.VSERVER1, fake.AGGREGATES,
|
||||
fake.CLUSTER_NAME, self.fake_src_vserver, fake.AGGREGATES,
|
||||
dest_ipspace=fake.IPSPACE)
|
||||
self.assertTrue(mock_get_aggregates.called)
|
||||
mock_delete_ipspace.assert_called_once_with(fake.IPSPACE)
|
||||
|
@ -10094,8 +10094,17 @@ class ShareManagerTestCase(test.TestCase):
|
||||
self.context, fake_share_network['id'])
|
||||
mock_subnet_get.assert_called_once_with(
|
||||
self.context, fake_dest_share_server['id'])
|
||||
mock_allocations_get.assert_called_once_with(
|
||||
self.context, fake_dest_share_server['id'])
|
||||
|
||||
if need_network_allocation:
|
||||
mock_allocations_get.assert_called_once_with(
|
||||
self.context, fake_dest_share_server['id'])
|
||||
else:
|
||||
mock_allocations_get.assert_has_calls(
|
||||
calls=[
|
||||
mock.call(self.context, fake_dest_share_server['id']),
|
||||
mock.call(self.context, fake_source_share_server['id']),
|
||||
]
|
||||
)
|
||||
|
||||
if not need_network_allocation:
|
||||
mock_form_server_setup_info.assert_called_once_with(
|
||||
|
@ -0,0 +1,10 @@
|
||||
---
|
||||
fixes:
|
||||
- |
|
||||
When performing a share server migration without new share network,
|
||||
reused allocations are properly updated with new share_server_id.
|
||||
- |
|
||||
In NetApp driver functions related to share server migration,
|
||||
vserver_name is now retrieved directly from backend_details instead
|
||||
of templating. This way, vserver_name is correct even for share
|
||||
servers that have already been migrated once.
|
Loading…
Reference in New Issue
Block a user