From c2ad4b28da2378b7433f7fe54ada87a278b5e1c3 Mon Sep 17 00:00:00 2001 From: jgilaber Date: Thu, 22 May 2025 16:34:48 +0200 Subject: [PATCH] Support zone migration audit without compute_nodes When only running volume migrations, a zone migration strategy audit without setting compute_nodes should work. Before this change, an audit with defined storage_pools, no compute_nodes parameters, and with_attached_volume is set to True would trigger the migration of the instances attached to the volumes being migrated. This patch decouples instance and volume migrations unless the user explicitely asks for both. When migrating attached volumes, the zone migration strategy will check for which instances should be migrated according to the audit parameters, and if the instance the volume is attached to can be migrated, it will be just after the volume. On the other hand, when the attached instances should not be migrated according to user input, only the volumes will be migrated. In an audit that migrates instnaces but not volumes, the with_attached_volume parameter will continue doing nothing. Closes-Bug: 2111429 Change-Id: If641af77ba368946398f9860c537a639d1053f69 Signed-off-by: jgilaber --- ...ithout-compute-nodes-ec4b1329e2b58279.yaml | 11 ++++++ .../strategy/strategies/zone_migration.py | 19 +++++++---- .../strategies/test_zone_migration.py | 34 +++++++++++++------ 3 files changed, 47 insertions(+), 17 deletions(-) create mode 100644 releasenotes/notes/zone-migration-with-attached-volume-without-compute-nodes-ec4b1329e2b58279.yaml diff --git a/releasenotes/notes/zone-migration-with-attached-volume-without-compute-nodes-ec4b1329e2b58279.yaml b/releasenotes/notes/zone-migration-with-attached-volume-without-compute-nodes-ec4b1329e2b58279.yaml new file mode 100644 index 000000000..cd59406d3 --- /dev/null +++ b/releasenotes/notes/zone-migration-with-attached-volume-without-compute-nodes-ec4b1329e2b58279.yaml @@ -0,0 +1,11 @@ +--- +fixes: + - | + The zone migration strategy no longer fails when when an audit is created + with defined storage_pools, compute_nodes is not provided, and + with_attached_volume is set to True. + The strategy now creates the required volume migrations, but no instance + migrations. Both volumes and instances will only be migrated if the audit parameters + have both compute_nodes and storage_pools. + + See: https://bugs.launchpad.net/watcher/+bug/2111429 for more details. diff --git a/watcher/decision_engine/strategy/strategies/zone_migration.py b/watcher/decision_engine/strategy/strategies/zone_migration.py index 3af1e5086..8e7360a7f 100644 --- a/watcher/decision_engine/strategy/strategies/zone_migration.py +++ b/watcher/decision_engine/strategy/strategies/zone_migration.py @@ -291,10 +291,12 @@ class ZoneMigration(base.ZoneMigrationBaseStrategy): action_counter = ActionCounter(total_limit, per_pool_limit, per_node_limit) + instance_targets = filtered_targets.get(INSTANCE, []) if VOLUME in filtered_targets: - self.volumes_migration(filtered_targets[VOLUME], action_counter) + self.volumes_migration(filtered_targets[VOLUME], action_counter, + instance_targets) if INSTANCE in filtered_targets: - self.instances_migration(filtered_targets[INSTANCE], + self.instances_migration(instance_targets, action_counter) LOG.debug("action total: %s, pools: %s, nodes %s ", @@ -381,7 +383,8 @@ class ZoneMigration(base.ZoneMigrationBaseStrategy): return (pool.get("dst_pool", None), pool.get("dst_type")) - def volumes_migration(self, volumes, action_counter): + def volumes_migration(self, volumes, action_counter, instance_targets=[]): + instance_target_ids = {instance.id for instance in instance_targets} for volume in volumes: if action_counter.is_total_max(): LOG.debug('total reached limit') @@ -410,10 +413,14 @@ class ZoneMigration(base.ZoneMigrationBaseStrategy): else: self._volume_retype(volume, dst_type) - # if with_attached_volume is True, migrate attaching instances + # if with_attached_volume is True, migrate instances attached + # to the volumes, only if they are possible migration targets if self.with_attached_volume: - instances = [self.nova.find_instance(dic.get('server_id')) - for dic in volume.attachments] + instances = [ + self.nova.find_instance(dic.get('server_id')) + for dic in volume.attachments + if dic.get('server_id') in instance_target_ids + ] self.instances_migration(instances, action_counter) action_counter.add_pool(pool) diff --git a/watcher/tests/decision_engine/strategy/strategies/test_zone_migration.py b/watcher/tests/decision_engine/strategy/strategies/test_zone_migration.py index 2de745fd2..152c3694d 100644 --- a/watcher/tests/decision_engine/strategy/strategies/test_zone_migration.py +++ b/watcher/tests/decision_engine/strategy/strategies/test_zone_migration.py @@ -590,6 +590,10 @@ class TestZoneMigration(TestBaseStrategy): id="INSTANCE_1", name="INSTANCE_1") vol_attach = [{"server_id": instance_on_src1.id}] + self.m_n_helper.get_instance_list.return_value = [ + instance_on_src1, + ] + self.m_n_helper.find_instance.return_value = instance_on_src1 volume_on_src1 = self.fake_volume(host="src1@back1#pool1", id=volume_uuid_mapping["volume_1"], name="volume_1", @@ -604,18 +608,18 @@ class TestZoneMigration(TestBaseStrategy): "src_type": "type1", "dst_type": "type1"}, ] self.m_migrate_compute_nodes.return_value = None - self.m_n_helper.get_instance_list.return_value = [ - instance_on_src1, - ] self.m_with_attached_volume.return_value = True - # check that the solution contains one 'swap' volume migration and one + # check that the solution contains one volume migration and no # instance migration, once the bug is fixed - # solution = self.strategy.execute() - - # temporarily catch the error to demonstrate it - exception = self.assertRaises(TypeError, self.strategy.execute) - self.assertEqual(str(exception), "'NoneType' object is not iterable") + solution = self.strategy.execute() + self.assertEqual(len(solution.actions), 1) + migration_types = collections.Counter( + [action.get('input_parameters')['migration_type'] + for action in solution.actions] + ) + self.assertEqual(0, migration_types.get("live", 0)) + self.assertEqual(1, migration_types.get("migrate", 0)) def test_execute_retype_volume(self): volume_on_src2 = self.fake_volume(host="src2@back1#pool1", @@ -1080,6 +1084,10 @@ class TestZoneMigration(TestBaseStrategy): live_ind = [item['value'] for item in solution.global_efficacy if item['name'] == "live_instance_migrate_ratio"][0] self.assertEqual(100, live_ind) + # check that the live migration is the third action, after all + # volume migrations, since with_attached_volume=False in this test + second_action = solution.actions[2]['input_parameters'] + self.assertEqual(second_action['migration_type'], 'live') def test_execute_mixed_instances_volumes_with_attached(self): instance_on_src1_1 = self.fake_instance( @@ -1136,7 +1144,7 @@ class TestZoneMigration(TestBaseStrategy): action_types = collections.Counter( [action['action_type'] for action in solution.actions]) - expected_vol_igrations = [ + expected_vol_migrations = [ {'action_type': 'volume_migrate', 'input_parameters': {'migration_type': 'migrate', @@ -1170,7 +1178,7 @@ class TestZoneMigration(TestBaseStrategy): for action in solution.actions if action['action_type'] == 'volume_migrate'] self.assertEqual(2, action_types.get("volume_migrate", 0)) - self.assertEqual(expected_vol_igrations, migrated_volumes) + self.assertEqual(expected_vol_migrations, migrated_volumes) migrated_vms = [action for action in solution.actions if action['action_type'] == 'migrate'] @@ -1178,6 +1186,10 @@ class TestZoneMigration(TestBaseStrategy): self.assertEqual(expected_vm_migrations, migrated_vms) self.assertEqual(2, action_types.get("migrate", 0)) + # check that the live migration is the second action, before other + # volume migrations + second_action = solution.actions[1]['input_parameters'] + self.assertEqual(second_action['migration_type'], 'live') # All the detached volumes in the pool should be migrated volume_mig_ind = [item['value'] for item in solution.global_efficacy