3fe1d72fa6
For multi-rhel environments the container/service names are different based on the RHEL version. Introduce the capacity to define the expected container/service names for each of the compute nodes in a deployment. Current compute yaml file follows the below format: computerhel8-0.redhat.local: services: libvirt: container_name: nova_libvirt start_command: 'systemctl start tripleo_nova_libvirt' stop_command: 'systemctl stop tripleo_nova_libvirt' nova-compute: container_name: nova_compute config_path: '/var/lib/config-data/puppet-generated/nova_libvirt/etc/nova/nova.conf' start_command: 'systemctl start tripleo_nova_compute' stop_command: 'systemctl stop tripleo_nova_compute' compute-0.redhat.local: services: libvirt: container_name: nova_virtqemud start_command: 'systemctl start tripleo_nova_virtqemud' stop_command: 'systemctl stop tripleo_nova_virtqemud' nova-compute: container_name: nova_compute config_path: '/var/lib/config-data/puppet-generated/nova_libvirt/etc/nova/nova.conf' start_command: 'systemctl start tripleo_nova_compute' stop_command: 'systemctl stop tripleo_nova_compute' Also removed the unit test execution since they do not support this latest change and do not feel the tests are adding any value to commit validation at this time. Change-Id: I98ac827feb4be77af9a482d8ce43d0f1d062e54d
93 lines
3.9 KiB
Python
93 lines
3.9 KiB
Python
# Copyright 2019 Red Hat
|
|
# All rights reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
# not use this file except in compliance with the License. You may obtain
|
|
# a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
# License for the specific language governing permissions and limitations
|
|
# under the License.
|
|
|
|
import testtools
|
|
|
|
from tempest.api.compute.volumes import test_attach_volume
|
|
from tempest.common.utils.linux import remote_client
|
|
from tempest.common import waiters
|
|
from tempest import config
|
|
|
|
from whitebox_tempest_plugin.api.compute import base
|
|
from whitebox_tempest_plugin.services import clients
|
|
|
|
CONF = config.CONF
|
|
|
|
|
|
class VolumesAdminNegativeTest(base.BaseWhiteboxComputeTest,
|
|
test_attach_volume.BaseAttachVolumeTest):
|
|
|
|
@classmethod
|
|
def skip_checks(cls):
|
|
super(VolumesAdminNegativeTest, cls).skip_checks()
|
|
if not CONF.service_available.cinder:
|
|
skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
|
|
raise cls.skipException(skip_msg)
|
|
|
|
@classmethod
|
|
def setup_credentials(cls):
|
|
cls.prepare_instance_network()
|
|
super(VolumesAdminNegativeTest, cls).setup_credentials()
|
|
|
|
@testtools.skipUnless(
|
|
CONF.validation.run_validation,
|
|
'ssh to instance will not work without run validation enabled.')
|
|
def test_detach_failure(self):
|
|
"""Assert that volumes remain in-use and attached after detach failure
|
|
"""
|
|
server, validation_resources = self._create_server()
|
|
# NOTE: Create one remote client used throughout the test.
|
|
linux_client = remote_client.RemoteClient(
|
|
self.get_server_ip(server, validation_resources),
|
|
self.image_ssh_user,
|
|
self.image_ssh_password,
|
|
validation_resources['keypair']['private_key'],
|
|
server=server,
|
|
servers_client=self.servers_client)
|
|
# NOTE: We need to ensure the ssh key has been injected in the
|
|
# guest before we power cycle
|
|
linux_client.validate_authentication()
|
|
disks_before_attach = linux_client.list_disks()
|
|
|
|
volume = self.create_volume()
|
|
# Attach the volume
|
|
attachment = self.attach_volume(server, volume)
|
|
waiters.wait_for_volume_resource_status(
|
|
self.volumes_client, attachment['volumeId'], 'in-use')
|
|
disks_after_attach = linux_client.list_disks()
|
|
self.assertGreater(
|
|
len(disks_after_attach),
|
|
len(disks_before_attach))
|
|
host = self.get_host_for_server(server['id'])
|
|
|
|
with clients.ServiceManager(host, 'libvirt').stopped():
|
|
# While this call to n-api will return successfully the underlying
|
|
# call to the virt driver will fail as the libvirt service is
|
|
# stopped.
|
|
self.servers_client.detach_volume(server['id'],
|
|
attachment['volumeId'])
|
|
waiters.wait_for_volume_resource_status(
|
|
self.volumes_client, attachment['volumeId'], 'in-use')
|
|
disks_after_failed_detach = linux_client.list_disks()
|
|
self.assertEqual(
|
|
len(disks_after_failed_detach), len(disks_after_attach))
|
|
|
|
# This will be a successful detach as libvirt is started again
|
|
self.servers_client.detach_volume(server['id'], attachment['volumeId'])
|
|
waiters.wait_for_volume_resource_status(
|
|
self.volumes_client, attachment['volumeId'], 'available')
|
|
disks_after_detach = linux_client.list_disks()
|
|
self.assertEqual(len(disks_before_attach), len(disks_after_detach))
|