Stop silently overriding clients with admin clients
Previously, our compute base class overwrote some clients with their admin versions. This is needlessly confusing. This patch just removes the setup_clients() method altogether, along with the create_test_server() override, and forces tests to explicitly use either self.os_primary or self.os_admin as the clients. This change has a few simple but widespread consequences: * We need to start using self.get_host_for_server() in a few places, instead of looking up the 'OS-EXT-SRV-ATTR:host' attribute in the server dict, as that's only present in the admin response. * We can drop the public visibility in copy_default_image(), as that's only allowed for admins, and the default shared visibility should work just as well for us. * The unit test for list_compute_hosts() would need to get fixed to account for the use of self.os_admin.services_client instead of self.services_client. Rather than do that, just drop the test entirely, it adds no value as list_compute_hosts() is used in whitebox tests themselves. * We need to start explicitly plassing wait_until='ACTIVE' to every one of our create_test_server calls, as the override used to do that for us. * Our live_migrate() helper now needs to be passed a clients manager so that it can pass that through to the waiter when waiting for the server to reach a particular status after the live migration. Depends-on: https://review.opendev.org/c/openstack/tempest/+/820062 Change-Id: I8d5be63275bd8a28b7012e14b99cadafdea53a47
This commit is contained in:
parent
a9ba235856
commit
6679a3853a
@ -38,36 +38,14 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
class BaseWhiteboxComputeTest(base.BaseV2ComputeAdminTest):
|
||||
|
||||
@classmethod
|
||||
def setup_clients(cls):
|
||||
super(BaseWhiteboxComputeTest, cls).setup_clients()
|
||||
# TODO(stephenfin): Rewrite tests to use 'admin_servers_client' etc.
|
||||
cls.servers_client = cls.os_admin.servers_client
|
||||
cls.flavors_client = cls.os_admin.flavors_client
|
||||
cls.service_client = cls.os_admin.services_client
|
||||
cls.image_client = cls.os_admin.image_client_v2
|
||||
cls.volumes_client = cls.os_admin.volumes_client_latest
|
||||
cls.admin_migration_client = cls.os_admin.migrations_client
|
||||
cls.admin_volumes_client = cls.os_admin.volumes_client_latest
|
||||
cls.admin_volume_types_client = cls.os_admin.volume_types_client_latest
|
||||
cls.admin_encryption_types_client =\
|
||||
cls.os_admin.encryption_types_client_latest
|
||||
|
||||
def create_test_server(self, wait_until='ACTIVE', *args, **kwargs):
|
||||
# override the function to return the admin view of the created server
|
||||
server = super(BaseWhiteboxComputeTest, self).create_test_server(
|
||||
*args, wait_until=wait_until, **kwargs)
|
||||
|
||||
return self.admin_servers_client.show_server(server['id'])['server']
|
||||
|
||||
def create_flavor(self, ram=64, vcpus=2,
|
||||
disk=CONF.whitebox.flavor_volume_size, name=None,
|
||||
is_public='True', extra_specs=None, **kwargs):
|
||||
flavor = super(BaseWhiteboxComputeTest, self).create_flavor(
|
||||
ram, vcpus, disk, name, is_public, **kwargs)
|
||||
if extra_specs:
|
||||
self.flavors_client.set_flavor_extra_spec(flavor['id'],
|
||||
**extra_specs)
|
||||
self.os_admin.flavors_client.set_flavor_extra_spec(flavor['id'],
|
||||
**extra_specs)
|
||||
return flavor
|
||||
|
||||
def copy_default_image(self, **kwargs):
|
||||
@ -77,8 +55,8 @@ class BaseWhiteboxComputeTest(base.BaseV2ComputeAdminTest):
|
||||
|
||||
:return image_id: The UUID of the newly created image.
|
||||
"""
|
||||
image = self.image_client.show_image(CONF.compute.image_ref)
|
||||
image_data = self.image_client.show_image_file(
|
||||
image = self.images_client.show_image(CONF.compute.image_ref)
|
||||
image_data = self.images_client.show_image_file(
|
||||
CONF.compute.image_ref).data
|
||||
image_file = six.BytesIO(image_data)
|
||||
|
||||
@ -87,12 +65,11 @@ class BaseWhiteboxComputeTest(base.BaseV2ComputeAdminTest):
|
||||
'disk_format': image['disk_format'],
|
||||
'min_disk': image['min_disk'],
|
||||
'min_ram': image['min_ram'],
|
||||
'visibility': 'public',
|
||||
}
|
||||
create_dict.update(kwargs)
|
||||
new_image = self.image_client.create_image(**create_dict)
|
||||
self.addCleanup(self.image_client.delete_image, new_image['id'])
|
||||
self.image_client.store_image_file(new_image['id'], image_file)
|
||||
new_image = self.images_client.create_image(**create_dict)
|
||||
self.addCleanup(self.images_client.delete_image, new_image['id'])
|
||||
self.images_client.store_image_file(new_image['id'], image_file)
|
||||
|
||||
return new_image['id']
|
||||
|
||||
@ -100,9 +77,8 @@ class BaseWhiteboxComputeTest(base.BaseV2ComputeAdminTest):
|
||||
"""Returns a list of all nova-compute hostnames in the deployment.
|
||||
Assumes all are up and running.
|
||||
"""
|
||||
binary_name = 'nova-compute'
|
||||
services = \
|
||||
self.service_client.list_services(binary=binary_name)['services']
|
||||
services = self.os_admin.services_client.list_services(
|
||||
binary='nova-compute')['services']
|
||||
return [service['host'] for service in services]
|
||||
|
||||
@contextlib.contextmanager
|
||||
@ -116,10 +92,10 @@ class BaseWhiteboxComputeTest(base.BaseV2ComputeAdminTest):
|
||||
yield [stack.enter_context(mgr) for mgr in ctxt_mgrs]
|
||||
|
||||
def get_server_xml(self, server_id):
|
||||
server = self.servers_client.show_server(server_id)
|
||||
server = self.os_admin.servers_client.show_server(server_id)
|
||||
host = server['server']['OS-EXT-SRV-ATTR:host']
|
||||
cntrlplane_addr = whitebox_utils.get_ctlplane_address(host)
|
||||
server_instance_name = self.servers_client.show_server(
|
||||
server_instance_name = self.os_admin.servers_client.show_server(
|
||||
server_id)['server']['OS-EXT-SRV-ATTR:instance_name']
|
||||
|
||||
virshxml = clients.VirshXMLClient(cntrlplane_addr)
|
||||
@ -127,8 +103,7 @@ class BaseWhiteboxComputeTest(base.BaseV2ComputeAdminTest):
|
||||
return ET.fromstring(xml)
|
||||
|
||||
def get_server_blockdevice_path(self, server_id, device_name):
|
||||
server = self.servers_client.show_server(server_id)
|
||||
host = server['server']['OS-EXT-SRV-ATTR:host']
|
||||
host = self.get_host_for_server(server_id)
|
||||
cntrlplane_addr = whitebox_utils.get_ctlplane_address(host)
|
||||
virshxml = clients.VirshXMLClient(cntrlplane_addr)
|
||||
blklist = virshxml.domblklist(server_id).splitlines()
|
||||
@ -138,12 +113,22 @@ class BaseWhiteboxComputeTest(base.BaseV2ComputeAdminTest):
|
||||
target, source = line.split()
|
||||
return source
|
||||
|
||||
def live_migrate(self, server_id, state, target_host=None):
|
||||
def live_migrate(self, clients, server_id, state, target_host=None):
|
||||
"""Live migrate a server.
|
||||
|
||||
:param client: Clients to use when waiting for the server to
|
||||
reach the specified state.
|
||||
:param server_id: The UUID of the server to live migrate.
|
||||
:param state: Wait for the server to reach this state after live
|
||||
migration.
|
||||
:param target_host: Optional target host for the live migration.
|
||||
"""
|
||||
orig_host = self.get_host_for_server(server_id)
|
||||
self.admin_servers_client.live_migrate_server(server_id,
|
||||
block_migration='auto',
|
||||
host=target_host)
|
||||
waiters.wait_for_server_status(self.servers_client, server_id, state)
|
||||
waiters.wait_for_server_status(clients.servers_client, server_id,
|
||||
state)
|
||||
if target_host:
|
||||
self.assertEqual(
|
||||
target_host, self.get_host_for_server(server_id),
|
||||
@ -166,7 +151,7 @@ class BaseWhiteboxComputeTest(base.BaseV2ComputeAdminTest):
|
||||
cache volumes stored in a separate tenant to the original volumes
|
||||
created from the type.
|
||||
"""
|
||||
volumes = self.admin_volumes_client.list_volumes(
|
||||
volumes = self.os_admin.volumes_client_latest.list_volumes(
|
||||
detail=True, params={'all_tenants': 1})['volumes']
|
||||
type_name = volume_type['name']
|
||||
for volume in [v for v in volumes if v['volume_type'] == type_name]:
|
||||
@ -175,7 +160,8 @@ class BaseWhiteboxComputeTest(base.BaseV2ComputeAdminTest):
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
self.volumes_client.delete_volume, volume['id'])
|
||||
self.volumes_client.wait_for_resource_deletion(volume['id'])
|
||||
self.admin_volume_types_client.delete_volume_type(volume_type['id'])
|
||||
self.os_admin.volume_types_client_latest.delete_volume_type(
|
||||
volume_type['id'])
|
||||
|
||||
def create_volume_type(self, client=None, name=None, backend_name=None,
|
||||
**kwargs):
|
||||
@ -197,7 +183,7 @@ class BaseWhiteboxComputeTest(base.BaseV2ComputeAdminTest):
|
||||
"""
|
||||
|
||||
if not client:
|
||||
client = self.admin_volume_types_client
|
||||
client = self.os_admin.volume_types_client_latest
|
||||
if not name:
|
||||
class_name = self.__class__.__name__
|
||||
name = data_utils.rand_name(class_name + '-volume-type')
|
||||
@ -222,7 +208,7 @@ class BaseWhiteboxComputeTest(base.BaseV2ComputeAdminTest):
|
||||
control_location=None):
|
||||
"""Creates an encryption type for volume"""
|
||||
if not client:
|
||||
client = self.admin_encryption_types_client
|
||||
client = self.os_admin.encryption_types_client_latest
|
||||
if not type_id:
|
||||
volume_type = self.create_volume_type()
|
||||
type_id = volume_type['id']
|
||||
|
@ -137,7 +137,7 @@ class CPUPolicyTest(BasePinningTest):
|
||||
"""Ensure an instance with an explicit 'shared' policy work."""
|
||||
flavor = self.create_flavor(vcpus=self.vcpus,
|
||||
extra_specs=self.shared_cpu_policy)
|
||||
self.create_test_server(flavor=flavor['id'])
|
||||
self.create_test_server(flavor=flavor['id'], wait_until='ACTIVE')
|
||||
|
||||
@testtools.skipUnless(CONF.whitebox.max_compute_nodes < 2,
|
||||
'Single compute node required.')
|
||||
@ -150,8 +150,10 @@ class CPUPolicyTest(BasePinningTest):
|
||||
"""
|
||||
flavor = self.create_flavor(vcpus=self.vcpus,
|
||||
extra_specs=self.dedicated_cpu_policy)
|
||||
server_a = self.create_test_server(flavor=flavor['id'])
|
||||
server_b = self.create_test_server(flavor=flavor['id'])
|
||||
server_a = self.create_test_server(flavor=flavor['id'],
|
||||
wait_until='ACTIVE')
|
||||
server_b = self.create_test_server(flavor=flavor['id'],
|
||||
wait_until='ACTIVE')
|
||||
cpu_pinnings_a = self.get_server_cpu_pinning(server_a['id'])
|
||||
cpu_pinnings_b = self.get_server_cpu_pinning(server_b['id'])
|
||||
|
||||
@ -175,7 +177,8 @@ class CPUPolicyTest(BasePinningTest):
|
||||
"""Ensure resizing an instance to unpinned actually drops pinning."""
|
||||
flavor_a = self.create_flavor(vcpus=self.vcpus,
|
||||
extra_specs=self.dedicated_cpu_policy)
|
||||
server = self.create_test_server(flavor=flavor_a['id'])
|
||||
server = self.create_test_server(flavor=flavor_a['id'],
|
||||
wait_until='ACTIVE')
|
||||
cpu_pinnings = self.get_server_cpu_pinning(server['id'])
|
||||
|
||||
self.assertEqual(
|
||||
@ -197,7 +200,8 @@ class CPUPolicyTest(BasePinningTest):
|
||||
"""Ensure resizing an instance to pinned actually applies pinning."""
|
||||
flavor_a = self.create_flavor(vcpus=self.vcpus,
|
||||
extra_specs=self.shared_cpu_policy)
|
||||
server = self.create_test_server(flavor=flavor_a['id'])
|
||||
server = self.create_test_server(flavor=flavor_a['id'],
|
||||
wait_until='ACTIVE')
|
||||
cpu_pinnings = self.get_server_cpu_pinning(server['id'])
|
||||
|
||||
self.assertEqual(
|
||||
@ -217,7 +221,8 @@ class CPUPolicyTest(BasePinningTest):
|
||||
"""Ensure pinning information is persisted after a reboot."""
|
||||
flavor = self.create_flavor(vcpus=self.vcpus,
|
||||
extra_specs=self.dedicated_cpu_policy)
|
||||
server = self.create_test_server(flavor=flavor['id'])
|
||||
server = self.create_test_server(flavor=flavor['id'],
|
||||
wait_until='ACTIVE')
|
||||
cpu_pinnings = self.get_server_cpu_pinning(server['id'])
|
||||
|
||||
self.assertEqual(
|
||||
@ -298,8 +303,9 @@ class CPUThreadPolicyTest(BasePinningTest):
|
||||
"""Ensure vCPUs *are not* placed on thread siblings."""
|
||||
flavor = self.create_flavor(vcpus=self.vcpus,
|
||||
extra_specs=self.isolate_thread_policy)
|
||||
server = self.create_test_server(flavor=flavor['id'])
|
||||
host = server['OS-EXT-SRV-ATTR:host']
|
||||
server = self.create_test_server(flavor=flavor['id'],
|
||||
wait_until='ACTIVE')
|
||||
host = self.get_host_for_server(server['id'])
|
||||
|
||||
cpu_pinnings = self.get_server_cpu_pinning(server['id'])
|
||||
pcpu_siblings = self.get_host_cpu_siblings(host)
|
||||
@ -326,8 +332,9 @@ class CPUThreadPolicyTest(BasePinningTest):
|
||||
"""
|
||||
flavor = self.create_flavor(vcpus=self.vcpus,
|
||||
extra_specs=self.prefer_thread_policy)
|
||||
server = self.create_test_server(flavor=flavor['id'])
|
||||
host = server['OS-EXT-SRV-ATTR:host']
|
||||
server = self.create_test_server(flavor=flavor['id'],
|
||||
wait_until='ACTIVE')
|
||||
host = self.get_host_for_server(server['id'])
|
||||
|
||||
cpu_pinnings = self.get_server_cpu_pinning(server['id'])
|
||||
pcpu_siblings = self.get_host_cpu_siblings(host)
|
||||
@ -353,8 +360,9 @@ class CPUThreadPolicyTest(BasePinningTest):
|
||||
"""
|
||||
flavor = self.create_flavor(vcpus=self.vcpus,
|
||||
extra_specs=self.require_thread_policy)
|
||||
server = self.create_test_server(flavor=flavor['id'])
|
||||
host = server['OS-EXT-SRV-ATTR:host']
|
||||
server = self.create_test_server(flavor=flavor['id'],
|
||||
wait_until='ACTIVE')
|
||||
host = self.get_host_for_server(server['id'])
|
||||
|
||||
cpu_pinnings = self.get_server_cpu_pinning(server['id'])
|
||||
pcpu_siblings = self.get_host_cpu_siblings(host)
|
||||
@ -400,7 +408,8 @@ class EmulatorThreadTest(BasePinningTest, numa_helper.NUMAHelperMixin):
|
||||
'hw:cpu_policy': 'dedicated',
|
||||
'hw:emulator_threads_policy': threads_policy
|
||||
}
|
||||
self.flavors_client.set_flavor_extra_spec(flavor['id'], **specs)
|
||||
self.os_admin.flavors_client.set_flavor_extra_spec(flavor['id'],
|
||||
**specs)
|
||||
return flavor
|
||||
|
||||
def test_policy_share_cpu_shared_set(self):
|
||||
@ -417,7 +426,8 @@ class EmulatorThreadTest(BasePinningTest, numa_helper.NUMAHelperMixin):
|
||||
flavor = self.create_flavor(threads_policy='share',
|
||||
vcpus=self.shared_cpus_per_numa)
|
||||
|
||||
server = self.create_test_server(flavor=flavor['id'])
|
||||
server = self.create_test_server(flavor=flavor['id'],
|
||||
wait_until='ACTIVE')
|
||||
|
||||
# Determine the compute host the guest was scheduled to and gather
|
||||
# the cpu shared set from the host
|
||||
@ -455,11 +465,12 @@ class EmulatorThreadTest(BasePinningTest, numa_helper.NUMAHelperMixin):
|
||||
threads_policy='share',
|
||||
vcpus=int(self.dedicated_cpus_per_numa / 2))
|
||||
|
||||
server_a = self.create_test_server(flavor=flavor['id'])
|
||||
server_a = self.create_test_server(flavor=flavor['id'],
|
||||
wait_until='ACTIVE')
|
||||
server_b = self.create_test_server(
|
||||
flavor=flavor['id'],
|
||||
scheduler_hints={'same_host': server_a['id']}
|
||||
)
|
||||
scheduler_hints={'same_host': server_a['id']},
|
||||
wait_until='ACTIVE')
|
||||
|
||||
# Gather the emulator threads from server A and B. Then gather the
|
||||
# pinned PCPUs from server A and B.
|
||||
@ -507,7 +518,8 @@ class EmulatorThreadTest(BasePinningTest, numa_helper.NUMAHelperMixin):
|
||||
flavor = self.create_flavor(threads_policy='isolate',
|
||||
vcpus=(self.dedicated_cpus_per_numa - 1))
|
||||
|
||||
server = self.create_test_server(flavor=flavor['id'])
|
||||
server = self.create_test_server(flavor=flavor['id'],
|
||||
wait_until='ACTIVE')
|
||||
|
||||
# Gather the emulator threads and the pinned PCPUs from the guest
|
||||
emulator_threads = \
|
||||
@ -560,7 +572,8 @@ class EmulatorThreadTest(BasePinningTest, numa_helper.NUMAHelperMixin):
|
||||
# Confirm the instance cannot be built
|
||||
self.assertRaises(BuildErrorException,
|
||||
self.create_test_server,
|
||||
flavor=flavor['id'])
|
||||
flavor=flavor['id'],
|
||||
wait_until='ACTIVE')
|
||||
|
||||
|
||||
class NUMALiveMigrationBase(BasePinningTest):
|
||||
@ -663,14 +676,16 @@ class NUMALiveMigrationTest(NUMALiveMigrationBase):
|
||||
'hw:emulator_threads_policy': 'share'}
|
||||
flavor = self.create_flavor(vcpus=(int(dedicated_cpus_per_numa / 2)),
|
||||
extra_specs=specs)
|
||||
server_a = self.create_test_server(flavor=flavor['id'])
|
||||
server_a = self.create_test_server(flavor=flavor['id'],
|
||||
wait_until='ACTIVE')
|
||||
# TODO(artom) As of 2.68 we can no longer force a live-migration,
|
||||
# and having the different_host hint in the RequestSpec will
|
||||
# prevent live migration. Start enabling/disabling
|
||||
# DifferentHostFilter as needed?
|
||||
server_b = self.create_test_server(
|
||||
flavor=flavor['id'],
|
||||
scheduler_hints={'different_host': server_a['id']})
|
||||
scheduler_hints={'different_host': server_a['id']},
|
||||
wait_until='ACTIVE')
|
||||
|
||||
# Iterate over both guests and confirm their pinned vCPUs and emulator
|
||||
# threads are correct
|
||||
@ -714,7 +729,8 @@ class NUMALiveMigrationTest(NUMALiveMigrationBase):
|
||||
|
||||
# Migrate server B to the same compute host as server A
|
||||
host_a = self.get_host_for_server(server_a['id'])
|
||||
self.live_migrate(server_b['id'], 'ACTIVE', target_host=host_a)
|
||||
self.live_migrate(self.os_primary, server_b['id'], 'ACTIVE',
|
||||
target_host=host_a)
|
||||
|
||||
# After migration, guests should have disjoint (non-null) CPU pins in
|
||||
# their XML
|
||||
@ -815,10 +831,12 @@ class NUMALiveMigrationTest(NUMALiveMigrationBase):
|
||||
extra_specs=specs)
|
||||
|
||||
# Boot two servers
|
||||
server_a = self.create_test_server(flavor=flavor['id'])
|
||||
server_a = self.create_test_server(flavor=flavor['id'],
|
||||
wait_until='ACTIVE')
|
||||
server_b = self.create_test_server(
|
||||
flavor=flavor['id'],
|
||||
scheduler_hints={'different_host': server_a['id']})
|
||||
scheduler_hints={'different_host': server_a['id']},
|
||||
wait_until='ACTIVE')
|
||||
|
||||
# Assert hugepage XML element is present on both servers and the
|
||||
# pagesize is correct
|
||||
@ -842,7 +860,8 @@ class NUMALiveMigrationTest(NUMALiveMigrationBase):
|
||||
|
||||
# Live migrate server_b
|
||||
compute_a = self.get_host_other_than(server_b['id'])
|
||||
self.live_migrate(server_b['id'], 'ACTIVE', target_host=compute_a)
|
||||
self.live_migrate(self.os_primary, server_b['id'], 'ACTIVE',
|
||||
target_host=compute_a)
|
||||
|
||||
# Assert hugepage XML element is still present and correct size for
|
||||
# server_b after live migration
|
||||
@ -893,25 +912,20 @@ class NUMACPUDedicatedLiveMigrationTest(NUMALiveMigrationBase):
|
||||
# a server with a cpu_dedicated policy and a server that will
|
||||
# float across the respective host's cpu_shared_set
|
||||
dedicated_server_a = self.create_test_server(
|
||||
flavor=dedicated_flavor['id']
|
||||
)
|
||||
flavor=dedicated_flavor['id'], wait_until='ACTIVE')
|
||||
host_a = self.get_host_for_server(dedicated_server_a['id'])
|
||||
|
||||
shared_server_a = self.create_test_server(
|
||||
clients=self.os_admin, flavor=shared_flavor['id'],
|
||||
host=host_a
|
||||
)
|
||||
host=host_a, wait_until='ACTIVE')
|
||||
|
||||
dedicated_server_b = self.create_test_server(
|
||||
flavor=dedicated_flavor['id'],
|
||||
scheduler_hints={'different_host': dedicated_server_a['id']}
|
||||
)
|
||||
scheduler_hints={'different_host': dedicated_server_a['id']},
|
||||
wait_until='ACTIVE')
|
||||
host_b = self.get_host_for_server(dedicated_server_b['id'])
|
||||
|
||||
shared_server_b = self.create_test_server(
|
||||
clients=self.os_admin, flavor=shared_flavor['id'],
|
||||
host=host_b
|
||||
)
|
||||
host=host_b, wait_until='ACTIVE')
|
||||
|
||||
host_sm_a = clients.NovaServiceManager(host_a, 'nova-compute',
|
||||
self.os_admin.services_client)
|
||||
@ -945,7 +959,7 @@ class NUMACPUDedicatedLiveMigrationTest(NUMALiveMigrationBase):
|
||||
# Live migrate shared server A to the compute node with shared
|
||||
# server B. Both servers are using shared vCPU's so migration
|
||||
# should be successful
|
||||
self.live_migrate(shared_server_a['id'], 'ACTIVE',
|
||||
self.live_migrate(self.os_admin, shared_server_a['id'], 'ACTIVE',
|
||||
target_host=host_b)
|
||||
|
||||
# Validate shared server A now has a shared cpuset that is a equal
|
||||
@ -962,7 +976,7 @@ class NUMACPUDedicatedLiveMigrationTest(NUMALiveMigrationBase):
|
||||
# Live migrate dedicated server A to the same host holding
|
||||
# dedicated server B. End result should be all 4 servers are on
|
||||
# the same host.
|
||||
self.live_migrate(dedicated_server_a['id'], 'ACTIVE',
|
||||
self.live_migrate(self.os_admin, dedicated_server_a['id'], 'ACTIVE',
|
||||
target_host=host_b)
|
||||
|
||||
# Dedicated server A should have a CPU pin set that is a subset of
|
||||
@ -1010,9 +1024,10 @@ class NUMARebuildTest(BasePinningTest):
|
||||
"""
|
||||
flavor = self.create_flavor(vcpus=self.vcpus,
|
||||
extra_specs=self.prefer_thread_policy)
|
||||
server = self.create_test_server(flavor=flavor['id'])
|
||||
server = self.create_test_server(flavor=flavor['id'],
|
||||
wait_until='ACTIVE')
|
||||
db_topo_orig = self._get_db_numa_topology(server['id'])
|
||||
host = server['OS-EXT-SRV-ATTR:host']
|
||||
host = self.get_host_for_server(server['id'])
|
||||
self.servers_client.rebuild_server(server['id'],
|
||||
self.image_ref_alt)['server']
|
||||
waiters.wait_for_server_status(self.servers_client,
|
||||
@ -1032,7 +1047,8 @@ class MixedCPUPolicyTest(BasePinningTest, numa_helper.NUMAHelperMixin):
|
||||
flavor = self.create_flavor(vcpus=self.vcpus,
|
||||
extra_specs=self.mixed_cpu_policy)
|
||||
|
||||
server = self.create_test_server(flavor=flavor['id'])
|
||||
server = self.create_test_server(flavor=flavor['id'],
|
||||
wait_until='ACTIVE')
|
||||
host = self.get_host_for_server(server['id'])
|
||||
host_sm = clients.NovaServiceManager(host, 'nova-compute',
|
||||
self.os_admin.services_client)
|
||||
|
@ -55,7 +55,7 @@ class FileBackedMemory(base.BaseWhiteboxComputeTest):
|
||||
('libvirt', 'file_backed_memory', self.size),
|
||||
('DEFAULT', 'ram_allocation_ratio', '1')
|
||||
):
|
||||
server = self.create_test_server()
|
||||
server = self.create_test_server(wait_until='ACTIVE')
|
||||
self._assert_shared_mode_and_file_type(server)
|
||||
self.resize_server(server['id'], self.new_flavor['id'])
|
||||
self._assert_shared_mode_and_file_type(server)
|
||||
@ -65,13 +65,13 @@ class FileBackedMemory(base.BaseWhiteboxComputeTest):
|
||||
('libvirt', 'file_backed_memory', self.size),
|
||||
('DEFAULT', 'ram_allocation_ratio', '1')
|
||||
):
|
||||
server = self.create_test_server()
|
||||
server = self.create_test_server(wait_until='ACTIVE')
|
||||
self._assert_shared_mode_and_file_type(server)
|
||||
self.live_migrate(server['id'], 'ACTIVE')
|
||||
self.live_migrate(self.os_primary, server['id'], 'ACTIVE')
|
||||
self._assert_shared_mode_and_file_type(server)
|
||||
|
||||
def test_live_migrate_non_file_backed_host_to_file_backed_host(self):
|
||||
server = self.create_test_server()
|
||||
server = self.create_test_server(wait_until='ACTIVE')
|
||||
dest = self.get_host_other_than(server['id'])
|
||||
dest_svc_mgr = clients.NovaServiceManager(
|
||||
dest, 'nova-compute', self.os_admin.services_client)
|
||||
|
@ -40,15 +40,18 @@ class HwVideoModelTest(base.BaseWhiteboxComputeTest):
|
||||
self.assertEqual(hw_video_type, hw_video.get('type'))
|
||||
|
||||
def test_create_virtio_instance(self):
|
||||
server = self.create_test_server(image_id=self.virtio_image_id)
|
||||
server = self.create_test_server(image_id=self.virtio_image_id,
|
||||
wait_until='ACTIVE')
|
||||
self._assert_hw_video_type(server, 'virtio')
|
||||
|
||||
def test_create_none_instance(self):
|
||||
server = self.create_test_server(image_id=self.none_image_id)
|
||||
server = self.create_test_server(image_id=self.none_image_id,
|
||||
wait_until='ACTIVE')
|
||||
self._assert_hw_video_type(server, 'none')
|
||||
|
||||
def test_rebuild_virtio_to_none(self):
|
||||
server = self.create_test_server(image_id=self.virtio_image_id)
|
||||
server = self.create_test_server(image_id=self.virtio_image_id,
|
||||
wait_until='ACTIVE')
|
||||
self._assert_hw_video_type(server, 'virtio')
|
||||
self.servers_client.rebuild_server(server['id'], self.none_image_id)
|
||||
waiters.wait_for_server_status(self.servers_client, server['id'],
|
||||
@ -56,7 +59,8 @@ class HwVideoModelTest(base.BaseWhiteboxComputeTest):
|
||||
self._assert_hw_video_type(server, 'none')
|
||||
|
||||
def test_rebuild_none_to_virtio(self):
|
||||
server = self.create_test_server(image_id=self.virtio_image_id)
|
||||
server = self.create_test_server(image_id=self.virtio_image_id,
|
||||
wait_until='ACTIVE')
|
||||
self._assert_hw_video_type(server, 'virtio')
|
||||
self.servers_client.rebuild_server(server['id'], self.none_image_id)
|
||||
waiters.wait_for_server_status(self.servers_client, server['id'],
|
||||
@ -67,5 +71,5 @@ class HwVideoModelTest(base.BaseWhiteboxComputeTest):
|
||||
'Requires expected default video model')
|
||||
def test_default_hw_device(self):
|
||||
expected_video_model = CONF.whitebox.default_video_model
|
||||
server = self.create_test_server()
|
||||
server = self.create_test_server(wait_until='ACTIVE')
|
||||
self._assert_hw_video_type(server, expected_video_model)
|
||||
|
@ -66,7 +66,7 @@ class LiveMigrationBase(base.BaseWhiteboxComputeTest,
|
||||
# The initial value of disk cache depends on config and the storage in
|
||||
# use. We can't guess it, so fetch it before we start.
|
||||
cache_type = root_disk_cache()
|
||||
self.live_migrate(server_id, 'ACTIVE')
|
||||
self.live_migrate(self.os_primary, server_id, 'ACTIVE')
|
||||
|
||||
# Assert cache-mode has not changed during live migration
|
||||
self.assertEqual(cache_type, root_disk_cache())
|
||||
@ -78,9 +78,10 @@ class LiveMigrationBase(base.BaseWhiteboxComputeTest,
|
||||
"""
|
||||
flavor = self.create_flavor(
|
||||
extra_specs={'hw:cpu_policy': 'dedicated'})
|
||||
server = self.create_test_server(flavor=flavor['id'])
|
||||
server = self.create_test_server(flavor=flavor['id'],
|
||||
wait_until='ACTIVE')
|
||||
pinned_cpus_pre_migration = self.get_pinning_as_set(server['id'])
|
||||
self.live_migrate(server['id'], 'ACTIVE')
|
||||
self.live_migrate(self.os_primary, server['id'], 'ACTIVE')
|
||||
pinned_cpus_post_migration = self.get_pinning_as_set(server['id'])
|
||||
self.assertTrue(
|
||||
pinned_cpus_post_migration.isdisjoint(pinned_cpus_pre_migration),
|
||||
|
@ -29,7 +29,8 @@ class MultiqueueTest(base.BaseWhiteboxComputeTest):
|
||||
flavor = self.create_flavor()
|
||||
server = self.create_test_server(
|
||||
flavor=flavor['id'], image_id=image_id,
|
||||
networks=[{'uuid': self.get_tenant_network()['id']}])
|
||||
networks=[{'uuid': self.get_tenant_network()['id']}],
|
||||
wait_until='ACTIVE')
|
||||
|
||||
domain = self.get_server_xml(server['id'])
|
||||
driver = domain.find('./devices/interface/driver')
|
||||
|
@ -42,7 +42,8 @@ class NVDIMMTests(base.BaseWhiteboxComputeTest):
|
||||
# [whitebox]/pem_flavor_size
|
||||
pmem_spec = {'hw:pmem': CONF.whitebox.pmem_flavor_size}
|
||||
flavor = self.create_flavor(extra_specs=pmem_spec)
|
||||
server = self.create_test_server(flavor=flavor['id'])
|
||||
server = self.create_test_server(flavor=flavor['id'],
|
||||
wait_until='ACTIVE')
|
||||
|
||||
# Confirm the memory xml model is nvdimm
|
||||
root = self.get_server_xml(server['id'])
|
||||
|
@ -39,7 +39,7 @@ class TestRBDDirectDownload(base.BaseWhiteboxComputeTest):
|
||||
raise cls.skipException(skip_msg)
|
||||
|
||||
def test_rbd_logs_and_conf(self):
|
||||
base_server = self.create_test_server()
|
||||
base_server = self.create_test_server(wait_until='ACTIVE')
|
||||
image = self.create_image_from_server(
|
||||
base_server['id'],
|
||||
name='base-server-img',
|
||||
@ -50,7 +50,13 @@ class TestRBDDirectDownload(base.BaseWhiteboxComputeTest):
|
||||
# pool to the local compute
|
||||
server = self.create_test_server(wait_until='ACTIVE',
|
||||
image_id=image['id'])
|
||||
host = server['OS-EXT-SRV-ATTR:host']
|
||||
|
||||
# Grab image id from newly created server
|
||||
detailed_server_data = \
|
||||
self.os_admin.servers_client.show_server(server['id'])['server']
|
||||
image_id = detailed_server_data['image']['id']
|
||||
|
||||
host = self.get_host_for_server(server['id'])
|
||||
host_sm = clients.NovaServiceManager(host, 'nova-compute',
|
||||
self.os_admin.services_client)
|
||||
rbd_pool = host_sm.get_conf_opt('glance', 'rbd_pool')
|
||||
@ -59,7 +65,7 @@ class TestRBDDirectDownload(base.BaseWhiteboxComputeTest):
|
||||
self.assertTrue(host_sm.get_conf_opt('glance', 'enable_rbd_download'))
|
||||
log_query_string = f"Attempting to export RBD image: " \
|
||||
f"[[]pool_name: {rbd_pool}[]] [[]image_uuid: " \
|
||||
f"{server['image']['id']}[]]"
|
||||
f"{image_id}[]]"
|
||||
host_ip = get_ctlplane_address(host)
|
||||
logs_client = clients.LogParserClient(host_ip)
|
||||
# Assert if log with specified image is found
|
||||
|
@ -31,7 +31,7 @@ class RxTxQueueSizeTest(base.BaseWhiteboxComputeTest):
|
||||
@testtools.skipUnless(CONF.whitebox.rx_queue_size,
|
||||
'`rx_queue_size` must be set')
|
||||
def test_rx_queue_size(self):
|
||||
server = self.create_test_server()
|
||||
server = self.create_test_server(wait_until='ACTIVE')
|
||||
domain = self.get_server_xml(server['id'])
|
||||
interface_criteria = \
|
||||
"devices/interface[@type='%s']/driver[@name='vhost']"
|
||||
|
@ -306,13 +306,15 @@ class SRIOVNumaAffinity(SRIOVBase, numa_helper.NUMAHelperMixin):
|
||||
flavor=flavor['id'],
|
||||
networks=[{'port': self.port_a['port']['id']}],
|
||||
clients=self.os_admin,
|
||||
host=host
|
||||
host=host,
|
||||
wait_until='ACTIVE',
|
||||
)
|
||||
server_b = self.create_test_server(
|
||||
flavor=flavor['id'],
|
||||
networks=[{'port': self.port_b['port']['id']}],
|
||||
clients=self.os_admin,
|
||||
host=host
|
||||
host=host,
|
||||
wait_until='ACTIVE',
|
||||
)
|
||||
cpu_pins_a = self.get_pinning_as_set(server_a['id'])
|
||||
cpu_pins_b = self.get_pinning_as_set(server_b['id'])
|
||||
@ -392,7 +394,8 @@ class SRIOVNumaAffinity(SRIOVBase, numa_helper.NUMAHelperMixin):
|
||||
flavor=flavor['id'],
|
||||
networks=[{'port': self.port_a['port']['id']}],
|
||||
clients=self.os_admin,
|
||||
host=host
|
||||
host=host,
|
||||
wait_until='ACTIVE',
|
||||
)
|
||||
|
||||
# With server A 'filling' pCPUs from the NUMA Node with SR-IOV
|
||||
@ -403,7 +406,8 @@ class SRIOVNumaAffinity(SRIOVBase, numa_helper.NUMAHelperMixin):
|
||||
flavor=flavor['id'],
|
||||
networks=[{'port': self.port_b['port']['id']}],
|
||||
clients=self.os_admin,
|
||||
host=host)
|
||||
host=host,
|
||||
wait_until='ACTIVE')
|
||||
|
||||
# Validate server A has correct sr-iov interface information
|
||||
# in the xml. Its type and vlan should be accurate.
|
||||
@ -487,7 +491,8 @@ class SRIOVMigration(SRIOVBase):
|
||||
)
|
||||
|
||||
# Live migrate the server
|
||||
self.live_migrate(server['id'], 'ACTIVE', target_host=hostname2)
|
||||
self.live_migrate(self.os_admin, server['id'], 'ACTIVE',
|
||||
target_host=hostname2)
|
||||
|
||||
# Search the instace's XML for the SR-IOV network device element based
|
||||
# on the mac address and binding:vnic_type from port info
|
||||
@ -514,7 +519,8 @@ class SRIOVMigration(SRIOVBase):
|
||||
'is %s' % pci_allocated_count)
|
||||
|
||||
# Migrate server back to the original host
|
||||
self.live_migrate(server['id'], 'ACTIVE', target_host=hostname1)
|
||||
self.live_migrate(self.os_admin, server['id'], 'ACTIVE',
|
||||
target_host=hostname1)
|
||||
|
||||
# Again find the instance's network device element based on the mac
|
||||
# address and binding:vnic_type from the port info provided by ports
|
||||
|
@ -49,17 +49,17 @@ class SelinuxLabelsTest(base.BaseWhiteboxComputeTest):
|
||||
root.find("./seclabel[@model='selinux']/imagelabel").text)
|
||||
|
||||
def test_create_server_with_label_check(self):
|
||||
server = self.create_test_server()
|
||||
server = self.create_test_server(wait_until='ACTIVE')
|
||||
self._assert_svirt_labels(server)
|
||||
|
||||
def test_resize_with_label_check(self):
|
||||
server = self.create_test_server()
|
||||
server = self.create_test_server(wait_until='ACTIVE')
|
||||
self._assert_svirt_labels(server)
|
||||
self.resize_server(server['id'], self.new_flavor['id'])
|
||||
self._assert_svirt_labels(server)
|
||||
|
||||
def test_live_migrate_with_label_check(self):
|
||||
server = self.create_test_server()
|
||||
server = self.create_test_server(wait_until='ACTIVE')
|
||||
self._assert_svirt_labels(server)
|
||||
self.live_migrate(server['id'], 'ACTIVE')
|
||||
self.live_migrate(self.os_primary, server['id'], 'ACTIVE')
|
||||
self._assert_svirt_labels(server)
|
||||
|
@ -263,7 +263,8 @@ class VGPUTest(base.BaseWhiteboxComputeTest):
|
||||
server = self.create_test_server(
|
||||
flavor=flavor['id'],
|
||||
validatable=True,
|
||||
validation_resources=validation_resources)
|
||||
validation_resources=validation_resources,
|
||||
wait_until='ACTIVE')
|
||||
|
||||
# NOTE(jparker) Order of operations for clean attempts to remove
|
||||
# validation resources before server is removed. Because of this
|
||||
@ -439,7 +440,8 @@ class VGPUResizeInstance(VGPUTest):
|
||||
def test_vgpu_to_standard_resize(self):
|
||||
# Create a vGPU instance and get the vGPU resource utilization from
|
||||
# its compute host
|
||||
server = self.create_test_server(flavor=self.vgpu_flavor['id'])
|
||||
server = self.create_test_server(flavor=self.vgpu_flavor['id'],
|
||||
wait_until='ACTIVE')
|
||||
host = self.get_host_for_server(server['id'])
|
||||
pre_resize_usage = self._get_vgpu_util_for_host(host)
|
||||
standard_flavor = self.create_flavor()
|
||||
|
@ -36,7 +36,7 @@ class TestVirtIORng(base.BaseWhiteboxComputeTest):
|
||||
raise cls.skipException(skip_msg)
|
||||
|
||||
def test_virtio_rng_model(self):
|
||||
server = self.create_test_server()
|
||||
server = self.create_test_server(wait_until='ACTIVE')
|
||||
domain = self.get_server_xml(server['id'])
|
||||
rng_device = domain.find("devices/rng")
|
||||
self.assertEqual(
|
||||
@ -47,6 +47,7 @@ class TestVirtIORng(base.BaseWhiteboxComputeTest):
|
||||
extra_specs = {'hw_rng:allowed': 'False'}
|
||||
flavor_id = self.create_flavor(
|
||||
extra_specs=extra_specs)['id']
|
||||
server = self.create_test_server(flavor=flavor_id)
|
||||
server = self.create_test_server(flavor=flavor_id,
|
||||
wait_until='ACTIVE')
|
||||
domain = self.get_server_xml(server['id'])
|
||||
self.assertIsNone(domain.find("devices/rng"))
|
||||
|
@ -133,7 +133,8 @@ class VirtioSCSIDisk(base.BaseWhiteboxComputeTest):
|
||||
# image_meta from the BDMs.
|
||||
server = self.create_test_server(flavor=self.flavor['id'],
|
||||
block_device_mapping_v2=bdms,
|
||||
image_id='')
|
||||
image_id='',
|
||||
wait_until='ACTIVE')
|
||||
|
||||
disk_ctrl = self.get_scsi_disk_controllers(server_id=server['id'])
|
||||
self.assertEqual(len(disk_ctrl), 1,
|
||||
@ -168,7 +169,8 @@ class VirtioSCSIDisk(base.BaseWhiteboxComputeTest):
|
||||
Validate that all volumes attach correctly to the instance.
|
||||
"""
|
||||
server = self.create_test_server(flavor=self.flavor['id'],
|
||||
image_id=self.img_id)
|
||||
image_id=self.img_id,
|
||||
wait_until='ACTIVE')
|
||||
vol_ids = []
|
||||
# A virtio-scsi disk has already been attached to the server's disk
|
||||
# controller since hw_scsi_model of the image was already set to
|
||||
|
@ -107,7 +107,7 @@ class TestQEMUVolumeEncryption(base.BaseWhiteboxComputeTest):
|
||||
|
||||
# Get volume details from qemu-img info with the previously generated
|
||||
# volume path
|
||||
host = get_ctlplane_address(server['OS-EXT-SRV-ATTR:host'])
|
||||
host = get_ctlplane_address(self.get_host_for_server(server['id']))
|
||||
qemu_img_client = QEMUImgClient(host)
|
||||
qemu_info = qemu_img_client.info(path)
|
||||
|
||||
|
@ -72,8 +72,7 @@ class VolumesAdminNegativeTest(base.BaseWhiteboxComputeTest,
|
||||
len(disks_after_attach),
|
||||
len(disks_before_attach))
|
||||
host = whitebox_utils.get_ctlplane_address(
|
||||
server['OS-EXT-SRV-ATTR:host']
|
||||
)
|
||||
self.get_host_for_server(server['id']))
|
||||
|
||||
with clients.ServiceManager(host, 'libvirt').stopped():
|
||||
# While this call to n-api will return successfully the underlying
|
||||
|
@ -46,7 +46,8 @@ class VPMUTest(base.BaseWhiteboxComputeTest):
|
||||
self.assertEqual('off', pmu.get('state'))
|
||||
|
||||
def test_rebuild_on_to_off(self):
|
||||
server = self.create_test_server(image_id=self.on_image_id)
|
||||
server = self.create_test_server(image_id=self.on_image_id,
|
||||
wait_until='ACTIVE')
|
||||
self._assert_pmu_on(server)
|
||||
self.servers_client.rebuild_server(server['id'], self.off_image_id)
|
||||
waiters.wait_for_server_status(self.servers_client, server['id'],
|
||||
@ -54,7 +55,8 @@ class VPMUTest(base.BaseWhiteboxComputeTest):
|
||||
self._assert_pmu_off(server)
|
||||
|
||||
def test_rebuild_off_to_on(self):
|
||||
server = self.create_test_server(image_id=self.off_image_id)
|
||||
server = self.create_test_server(image_id=self.off_image_id,
|
||||
wait_until='ACTIVE')
|
||||
self._assert_pmu_off(server)
|
||||
self.servers_client.rebuild_server(server['id'], self.on_image_id)
|
||||
waiters.wait_for_server_status(self.servers_client, server['id'],
|
||||
@ -62,13 +64,15 @@ class VPMUTest(base.BaseWhiteboxComputeTest):
|
||||
self._assert_pmu_on(server)
|
||||
|
||||
def test_resize_on_to_off(self):
|
||||
server = self.create_test_server(flavor=self.on_flavor['id'])
|
||||
server = self.create_test_server(flavor=self.on_flavor['id'],
|
||||
wait_until='ACTIVE')
|
||||
self._assert_pmu_on(server)
|
||||
self.resize_server(server['id'], self.off_flavor['id'])
|
||||
self._assert_pmu_off(server)
|
||||
|
||||
def test_resize_off_to_on(self):
|
||||
server = self.create_test_server(flavor=self.off_flavor['id'])
|
||||
server = self.create_test_server(flavor=self.off_flavor['id'],
|
||||
wait_until='ACTIVE')
|
||||
self._assert_pmu_off(server)
|
||||
self.resize_server(server['id'], self.on_flavor['id'])
|
||||
self._assert_pmu_on(server)
|
||||
|
@ -1,52 +0,0 @@
|
||||
# Copyright 2018 Red Hat
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import mock
|
||||
|
||||
from whitebox_tempest_plugin.api.compute import base as compute_base
|
||||
from whitebox_tempest_plugin.tests import base
|
||||
|
||||
|
||||
def fake_show_server(server_id):
|
||||
if server_id == 'fake-id':
|
||||
return {'server': {'OS-EXT-SRV-ATTR:host': 'fake-host'}}
|
||||
else:
|
||||
return {'server': {'OS-EXT-SRV-ATTR:host': 'missing-host'}}
|
||||
|
||||
|
||||
def fake_list_services(binary):
|
||||
return {'services': [{'binary': 'nova-compute', 'host': 'fake-host'},
|
||||
{'binary': 'nova-compute', 'host': 'fake-host2'}]}
|
||||
|
||||
|
||||
class ComputeBaseTestCase(base.WhiteboxPluginTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(ComputeBaseTestCase, self).setUp()
|
||||
# NOTE(artom) We need to mock __init__ for the class to instantiate
|
||||
# correctly.
|
||||
compute_base.BaseWhiteboxComputeTest.__init__ = mock.Mock(
|
||||
return_value=None)
|
||||
self.test_class = compute_base.BaseWhiteboxComputeTest()
|
||||
self.test_class.servers_client = mock.Mock()
|
||||
self.test_class.service_client = mock.Mock()
|
||||
self.test_class.servers_client.show_server = fake_show_server
|
||||
self.test_class.service_client.list_services = fake_list_services
|
||||
self.flags(ctlplane_addresses={'fake-host': 'fake-ip',
|
||||
'fake-host2': 'fake-ip2'},
|
||||
group='whitebox')
|
||||
|
||||
def test_list_compute_hosts(self):
|
||||
self.assertItemsEqual(['fake-host', 'fake-host2'],
|
||||
self.test_class.list_compute_hosts())
|
Loading…
x
Reference in New Issue
Block a user