Refactor generic cleanup mechanism part 3
Current cleanup mechanism is awful. It's hardcoded, without repeat on failures, contains a lot of mistakes, in some places it is too fast (in case of deletion of VMs) in other to slow (like deletion of users). As well there is mix of cleanup mechanism and resource cleanup mechanism. To resolve these all issues this patch introcude a cleanup engine that resolves all issues above. It's resource based, so to add new resouce you should just a make subclass of base.Resource and probably override some of methods like (list, delete, is_deleted) and that's all. All complexity of managing: 0) waiting until async deletion is finished 1) repeat on failure logic 2) greaceful failure handling 3) parallelization 4) plugin support Is hidden deep inside cleanup engine + bonus we are able to specify now single resource (without clenaping whole service) PART 3: ------- *) Refactor all contexts cleanups method to use new generic cleanup engine insted of cleanup.utils *) Remove obsolate cleanup.utils *) Fix all tests bp benchmark-context-cleanup-refactor Change-Id: I70557e6ebb56bbe565792d9ee854d3e78428a881
This commit is contained in:
parent
8003bf8c0f
commit
64d34ae594
@ -11,15 +11,24 @@ Rally should delete in any case all resources that it created during benchmark.
|
||||
Problem Description
|
||||
-------------------
|
||||
|
||||
* Deletion rate limit
|
||||
* (implemented) Deletion rate limit
|
||||
|
||||
You can kill cloud by deleting too many objects simultaneously, so deletion
|
||||
rate limit is required
|
||||
|
||||
* Retry on failures
|
||||
* (implemented) Retry on failures
|
||||
|
||||
There should be few attempts to delete resource in case of failures
|
||||
|
||||
* (implemented) Log resources that failed to be deleted
|
||||
|
||||
We should log warnings about all non deleted resources. This information
|
||||
should include UUID of resource, it's type and project.
|
||||
|
||||
* (implemented) Pluggable
|
||||
|
||||
It should be simple to add new cleanups adding just plugins somewhere.
|
||||
|
||||
* Disaster recovery
|
||||
|
||||
Rally should use special name patterns, to be able to delete resources
|
||||
|
@ -1,265 +0,0 @@
|
||||
# Copyright 2013: Mirantis Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from neutronclient.common import exceptions as neutron_exceptions
|
||||
|
||||
from rally.benchmark.scenarios.keystone import utils as kutils
|
||||
from rally.benchmark import utils as bench_utils
|
||||
from rally.benchmark.wrappers import keystone as keystone_wrapper
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def delete_cinder_resources(cinder):
|
||||
delete_volume_transfers(cinder)
|
||||
delete_volumes(cinder)
|
||||
delete_volume_snapshots(cinder)
|
||||
delete_volume_backups(cinder)
|
||||
|
||||
|
||||
def delete_glance_resources(glance, project_uuid):
|
||||
delete_images(glance, project_uuid)
|
||||
|
||||
|
||||
def delete_heat_resources(heat):
|
||||
delete_stacks(heat)
|
||||
|
||||
|
||||
def delete_admin_quotas(client, tenants):
|
||||
for tenant in tenants:
|
||||
delete_quotas(client, tenant["id"])
|
||||
|
||||
|
||||
def delete_keystone_resources(keystone):
|
||||
keystone = keystone_wrapper.wrap(keystone)
|
||||
for resource in ["user", "project", "service", "role"]:
|
||||
_delete_single_keystone_resource_type(keystone, resource)
|
||||
|
||||
|
||||
def _delete_single_keystone_resource_type(keystone, resource_name):
|
||||
for resource in getattr(keystone, "list_%ss" % resource_name)():
|
||||
if kutils.is_temporary(resource):
|
||||
getattr(keystone, "delete_%s" % resource_name)(resource.id)
|
||||
|
||||
|
||||
def delete_images(glance, project_uuid):
|
||||
for image in glance.images.list(owner=project_uuid):
|
||||
image.delete()
|
||||
_wait_for_list_statuses(glance.images, statuses=["DELETED"],
|
||||
list_query={'owner': project_uuid},
|
||||
timeout=600, check_interval=3)
|
||||
|
||||
|
||||
def delete_quotas(admin_clients, project_uuid):
|
||||
admin_clients.nova().quotas.delete(project_uuid)
|
||||
admin_clients.cinder().quotas.delete(project_uuid)
|
||||
|
||||
|
||||
def delete_stacks(heat):
|
||||
for stack in heat.stacks.list():
|
||||
stack.delete()
|
||||
_wait_for_list_statuses(heat.stacks, statuses=["DELETE_COMPLETE"],
|
||||
timeout=600, check_interval=3)
|
||||
|
||||
|
||||
def delete_volumes(cinder):
|
||||
for vol in cinder.volumes.list():
|
||||
vol.delete()
|
||||
_wait_for_empty_list(cinder.volumes, timeout=120)
|
||||
|
||||
|
||||
def delete_volume_transfers(cinder):
|
||||
for transfer in cinder.transfers.list():
|
||||
transfer.delete()
|
||||
_wait_for_empty_list(cinder.transfers)
|
||||
|
||||
|
||||
def delete_volume_snapshots(cinder):
|
||||
for snapshot in cinder.volume_snapshots.list():
|
||||
snapshot.delete()
|
||||
_wait_for_empty_list(cinder.volume_snapshots, timeout=240)
|
||||
|
||||
|
||||
def delete_volume_backups(cinder):
|
||||
for backup in cinder.backups.list():
|
||||
backup.delete()
|
||||
_wait_for_empty_list(cinder.backups, timeout=240)
|
||||
|
||||
|
||||
def delete_nova_resources(nova):
|
||||
delete_servers(nova)
|
||||
delete_keypairs(nova)
|
||||
delete_secgroups(nova)
|
||||
|
||||
|
||||
def delete_secgroups(nova):
|
||||
for secgroup in nova.security_groups.list():
|
||||
if secgroup.name != "default": # inc0: we shouldn't mess with default
|
||||
secgroup.delete()
|
||||
|
||||
|
||||
def delete_servers(nova):
|
||||
for server in nova.servers.list():
|
||||
server.delete()
|
||||
_wait_for_empty_list(nova.servers, timeout=600, check_interval=3)
|
||||
|
||||
|
||||
def delete_keypairs(nova):
|
||||
for keypair in nova.keypairs.list():
|
||||
keypair.delete()
|
||||
_wait_for_empty_list(nova.keypairs)
|
||||
|
||||
|
||||
def delete_neutron_resources(neutron, project_uuid):
|
||||
search_opts = {"tenant_id": project_uuid}
|
||||
# Ports
|
||||
for port in neutron.list_ports(**search_opts)["ports"]:
|
||||
# Detach routers
|
||||
if port["device_owner"] == "network:router_interface":
|
||||
neutron.remove_interface_router(
|
||||
port["device_id"], {
|
||||
"port_id": port["id"]
|
||||
})
|
||||
else:
|
||||
try:
|
||||
neutron.delete_port(port["id"])
|
||||
except neutron_exceptions.PortNotFoundClient:
|
||||
# Port can be already auto-deleted, skip silently
|
||||
pass
|
||||
# Routers
|
||||
for router in neutron.list_routers(**search_opts)["routers"]:
|
||||
neutron.delete_router(router["id"])
|
||||
|
||||
# Subnets
|
||||
for subnet in neutron.list_subnets(**search_opts)["subnets"]:
|
||||
neutron.delete_subnet(subnet["id"])
|
||||
|
||||
# Networks
|
||||
for network in neutron.list_networks(**search_opts)["networks"]:
|
||||
neutron.delete_network(network["id"])
|
||||
|
||||
|
||||
def delete_designate_resources(designate):
|
||||
for domain in designate.domains.list():
|
||||
designate.domains.delete(domain.id)
|
||||
|
||||
|
||||
def delete_ceilometer_resources(ceilometer, project_uuid):
|
||||
delete_alarms(ceilometer, project_uuid)
|
||||
|
||||
|
||||
def delete_alarms(ceilometer, project_uuid):
|
||||
alarms = ceilometer.alarms.list(q=[{"field": "project_id",
|
||||
"op": "eq",
|
||||
"value": project_uuid}])
|
||||
for alarm in alarms:
|
||||
ceilometer.alarms.delete(alarm.alarm_id)
|
||||
|
||||
|
||||
def delete_sahara_resources(sahara):
|
||||
# Delete EDP related objects
|
||||
delete_job_executions(sahara)
|
||||
delete_jobs(sahara)
|
||||
delete_job_binary_internals(sahara)
|
||||
delete_job_binaries(sahara)
|
||||
delete_data_sources(sahara)
|
||||
|
||||
# Delete cluster related objects
|
||||
delete_clusters(sahara)
|
||||
delete_cluster_templates(sahara)
|
||||
delete_node_group_templates(sahara)
|
||||
|
||||
|
||||
def delete_job_executions(sahara):
|
||||
for je in sahara.job_executions.list():
|
||||
sahara.job_executions.delete(je.id)
|
||||
|
||||
_wait_for_empty_list(sahara.job_executions)
|
||||
|
||||
|
||||
def delete_jobs(sahara):
|
||||
for job in sahara.jobs.list():
|
||||
sahara.jobs.delete(job.id)
|
||||
|
||||
|
||||
def delete_job_binary_internals(sahara):
|
||||
for jbi in sahara.job_binary_internals.list():
|
||||
sahara.job_binary_internals.delete(jbi.id)
|
||||
|
||||
|
||||
def delete_job_binaries(sahara):
|
||||
for jb in sahara.job_binaries.list():
|
||||
sahara.job_binaries.delete(jb.id)
|
||||
|
||||
|
||||
def delete_data_sources(sahara):
|
||||
for ds in sahara.data_sources.list():
|
||||
sahara.data_sources.delete(ds.id)
|
||||
|
||||
|
||||
def delete_clusters(sahara):
|
||||
for cluster in sahara.clusters.list():
|
||||
sahara.clusters.delete(cluster.id)
|
||||
|
||||
_wait_for_empty_list(sahara.clusters)
|
||||
|
||||
|
||||
def delete_cluster_templates(sahara):
|
||||
for ct in sahara.cluster_templates.list():
|
||||
sahara.cluster_templates.delete(ct.id)
|
||||
|
||||
|
||||
def delete_node_group_templates(sahara):
|
||||
for ngt in sahara.node_group_templates.list():
|
||||
sahara.node_group_templates.delete(ngt.id)
|
||||
|
||||
|
||||
def delete_zaqar_resources(zaqar):
|
||||
# delete messages
|
||||
for queue in zaqar.queues.list():
|
||||
for msg in queue.messages.list():
|
||||
msg.delete()
|
||||
|
||||
# delete queues
|
||||
for queue in zaqar.queues.list():
|
||||
queue.delete()
|
||||
|
||||
|
||||
def _wait_for_empty_list(mgr, timeout=10, check_interval=1):
|
||||
_wait_for_list_size(mgr, sizes=[0], timeout=timeout,
|
||||
check_interval=check_interval)
|
||||
|
||||
|
||||
def _wait_for_list_size(mgr, sizes=[0], timeout=10, check_interval=1):
|
||||
bench_utils.wait_for(mgr, is_ready=bench_utils.manager_list_size(sizes),
|
||||
update_resource=None, timeout=timeout,
|
||||
check_interval=check_interval)
|
||||
|
||||
|
||||
def _wait_for_list_statuses(mgr, statuses, list_query=None,
|
||||
timeout=10, check_interval=1):
|
||||
list_query = list_query or {}
|
||||
|
||||
def _list_statuses(mgr):
|
||||
for resource in mgr.list(**list_query):
|
||||
status = bench_utils.get_status(resource)
|
||||
if status not in statuses:
|
||||
return False
|
||||
return True
|
||||
|
||||
bench_utils.wait_for(mgr, is_ready=_list_statuses, update_resource=None,
|
||||
timeout=timeout, check_interval=check_interval)
|
@ -15,10 +15,11 @@
|
||||
import six
|
||||
|
||||
from rally.benchmark.context import base
|
||||
from rally.benchmark.context.cleanup import utils as cleanup_utils
|
||||
from rally.benchmark.context.cleanup import manager as resource_manager
|
||||
from rally.benchmark.scenarios import base as scenario_base
|
||||
from rally.benchmark.scenarios.glance import utils as glance_utils
|
||||
from rally import exceptions
|
||||
from rally.i18n import _
|
||||
from rally.openstack.common import log as logging
|
||||
from rally import osclients
|
||||
from rally import utils as rutils
|
||||
@ -93,13 +94,9 @@ class ImageGenerator(base.Context):
|
||||
|
||||
@rutils.log_task_wrapper(LOG.info, _("Exit context: `Images`"))
|
||||
def cleanup(self):
|
||||
for images in self.context["images"]:
|
||||
try:
|
||||
glance = osclients.Clients(images["endpoint"]).glance()
|
||||
cleanup_utils.delete_glance_resources(glance,
|
||||
images["tenant_id"])
|
||||
except Exception:
|
||||
raise exceptions.ImageCleanUpException()
|
||||
# TODO(boris-42): Delete only resources created by this context
|
||||
resource_manager.cleanup(names=["glance.images"],
|
||||
users=self.context.get("users", []))
|
||||
|
||||
@classmethod
|
||||
def validate_semantic(cls, config, admin, users, task):
|
||||
|
@ -14,9 +14,9 @@
|
||||
# under the License.
|
||||
|
||||
import novaclient.exceptions
|
||||
import six
|
||||
|
||||
from rally.benchmark.context import base
|
||||
from rally.benchmark.context.cleanup import manager as resource_manager
|
||||
from rally.i18n import _
|
||||
from rally.openstack.common import log as logging
|
||||
from rally import osclients
|
||||
@ -33,23 +33,17 @@ class Keypair(base.Context):
|
||||
|
||||
KEYPAIR_NAME = "rally_ssh_key"
|
||||
|
||||
def _get_nova_client(self, endpoint):
|
||||
return osclients.Clients(endpoint).nova()
|
||||
|
||||
def _keypair_safe_remove(self, nova):
|
||||
try:
|
||||
nova.keypairs.delete(self.KEYPAIR_NAME)
|
||||
except novaclient.exceptions.NotFound:
|
||||
pass
|
||||
|
||||
def _generate_keypair(self, endpoint):
|
||||
nova = self._get_nova_client(endpoint)
|
||||
nova_client = osclients.Clients(endpoint).nova()
|
||||
|
||||
# NOTE(hughsaunders): If keypair exists, it must be deleted as we can't
|
||||
# retrieve the private key
|
||||
self._keypair_safe_remove(nova)
|
||||
try:
|
||||
nova_client.keypairs.delete(self.KEYPAIR_NAME)
|
||||
except novaclient.exceptions.NotFound:
|
||||
pass
|
||||
|
||||
keypair = nova.keypairs.create(self.KEYPAIR_NAME)
|
||||
keypair = nova_client.keypairs.create(self.KEYPAIR_NAME)
|
||||
return {"private": keypair.private_key,
|
||||
"public": keypair.public_key}
|
||||
|
||||
@ -61,15 +55,6 @@ class Keypair(base.Context):
|
||||
|
||||
@utils.log_task_wrapper(LOG.info, _("Exit context: `keypair`"))
|
||||
def cleanup(self):
|
||||
for user in self.context["users"]:
|
||||
endpoint = user['endpoint']
|
||||
try:
|
||||
nova = self._get_nova_client(endpoint)
|
||||
self._keypair_safe_remove(nova)
|
||||
except Exception as e:
|
||||
LOG.warning("Unable to delete keypair: %(kpname)s for user "
|
||||
"%(tenant)s/%(user)s: %(message)s"
|
||||
% {'kpname': self.KEYPAIR_NAME,
|
||||
'tenant': endpoint.tenant_name,
|
||||
'user': endpoint.username,
|
||||
'message': six.text_type(e)})
|
||||
# TODO(boris-42): Delete only resources created by this context
|
||||
resource_manager.cleanup(names=["nova.keypairs"],
|
||||
users=self.context.get("users", []))
|
||||
|
@ -16,11 +16,12 @@
|
||||
from oslo.config import cfg
|
||||
|
||||
from rally.benchmark.context import base
|
||||
from rally.benchmark.context.cleanup import utils as cleanup_utils
|
||||
from rally.benchmark.context.cleanup import manager as resource_manager
|
||||
from rally.benchmark.scenarios.sahara import utils
|
||||
from rally.benchmark import types
|
||||
from rally.benchmark import utils as bench_utils
|
||||
from rally import exceptions
|
||||
from rally.i18n import _
|
||||
from rally.openstack.common import log as logging
|
||||
from rally import osclients
|
||||
from rally import utils as rutils
|
||||
@ -164,11 +165,7 @@ class SaharaCluster(base.Context):
|
||||
|
||||
@rutils.log_task_wrapper(LOG.info, _("Exit context: `Sahara Cluster`"))
|
||||
def cleanup(self):
|
||||
clean_tenants = set()
|
||||
for user in self.context.get("users", []):
|
||||
tenant_id = user["tenant_id"]
|
||||
if tenant_id not in clean_tenants:
|
||||
clean_tenants.add(tenant_id)
|
||||
|
||||
sahara = osclients.Clients(user["endpoint"]).sahara()
|
||||
cleanup_utils.delete_clusters(sahara)
|
||||
# TODO(boris-42): Delete only resources created by this context
|
||||
resource_manager.cleanup(names=["sahara.clusters"],
|
||||
users=self.context.get("users", []))
|
||||
|
@ -16,8 +16,9 @@
|
||||
import urllib2
|
||||
|
||||
from rally.benchmark.context import base
|
||||
from rally.benchmark.context.cleanup import utils as cleanup_utils
|
||||
from rally.benchmark.context.cleanup import manager as resource_manager
|
||||
from rally import exceptions
|
||||
from rally.i18n import _
|
||||
from rally.openstack.common import log as logging
|
||||
from rally import osclients
|
||||
from rally import utils as rutils
|
||||
@ -167,16 +168,10 @@ class SaharaEDP(base.Context):
|
||||
|
||||
@rutils.log_task_wrapper(LOG.info, _("Exit context: `Sahara EDP`"))
|
||||
def cleanup(self):
|
||||
clean_tenants = set()
|
||||
for user in self.context.get("users", []):
|
||||
tenant_id = user["tenant_id"]
|
||||
if tenant_id not in clean_tenants:
|
||||
clean_tenants.add(tenant_id)
|
||||
resources = ["job_executions", "jobs", "job_binary_internals",
|
||||
"job_binaries", "data_sources"]
|
||||
|
||||
sahara = osclients.Clients(user["endpoint"]).sahara()
|
||||
|
||||
cleanup_utils.delete_job_executions(sahara)
|
||||
cleanup_utils.delete_jobs(sahara)
|
||||
cleanup_utils.delete_job_binary_internals(sahara)
|
||||
cleanup_utils.delete_job_binaries(sahara)
|
||||
cleanup_utils.delete_data_sources(sahara)
|
||||
# TODO(boris-42): Delete only resources created by this context
|
||||
resource_manager.cleanup(
|
||||
names=map(lambda r: "sahara.%s" % r, resources),
|
||||
users=self.context.get("users", []))
|
||||
|
@ -13,10 +13,10 @@
|
||||
# under the License.
|
||||
|
||||
from rally.benchmark.context import base
|
||||
from rally.benchmark.context.cleanup import utils as cleanup_utils
|
||||
from rally.benchmark.context.cleanup import manager as resource_manager
|
||||
from rally.benchmark.scenarios import base as scenarios_base
|
||||
from rally.benchmark.scenarios.glance import utils as glance_utils
|
||||
from rally import exceptions
|
||||
from rally.i18n import _
|
||||
from rally.openstack.common import log as logging
|
||||
from rally import osclients
|
||||
from rally import utils as rutils
|
||||
@ -91,16 +91,7 @@ class SaharaImage(base.Context):
|
||||
|
||||
@rutils.log_task_wrapper(LOG.info, _("Exit context: `Sahara Image`"))
|
||||
def cleanup(self):
|
||||
clean_tenants = set([])
|
||||
for user in self.context.get("users", []):
|
||||
tenant_id = user["tenant_id"]
|
||||
if tenant_id not in clean_tenants:
|
||||
clean_tenants.add(tenant_id)
|
||||
|
||||
try:
|
||||
glance = osclients.Clients(user["endpoint"]).glance()
|
||||
cleanup_utils.delete_glance_resources(glance,
|
||||
user["tenant_id"])
|
||||
except Exception as e:
|
||||
LOG.error(e)
|
||||
raise exceptions.ImageCleanUpException()
|
||||
# TODO(boris-42): Delete only resources created by this context
|
||||
resource_manager.cleanup(names=["glance.images"],
|
||||
users=self.context.get("users", []))
|
||||
|
@ -13,8 +13,9 @@
|
||||
# under the License.
|
||||
|
||||
from rally.benchmark.context import base
|
||||
from rally.benchmark.context.cleanup import utils as cleanup_utils
|
||||
from rally.benchmark.context.cleanup import manager as resource_manager
|
||||
from rally.benchmark.scenarios.cinder import utils as cinder_utils
|
||||
from rally.i18n import _
|
||||
from rally.openstack.common import log as logging
|
||||
from rally import osclients
|
||||
from rally import utils as rutils
|
||||
@ -65,14 +66,6 @@ class VolumeGenerator(base.Context):
|
||||
|
||||
@rutils.log_task_wrapper(LOG.info, _("Exit context: `Volumes`"))
|
||||
def cleanup(self):
|
||||
for volume in self.context["volumes"]:
|
||||
try:
|
||||
cinder = osclients.Clients(volume["endpoint"]).cinder()
|
||||
cleanup_utils.delete_cinder_resources(cinder)
|
||||
except Exception as ex:
|
||||
LOG.warning("Failed to remove volume: %(volume_id)s for user "
|
||||
"%(tenant)s/%(user)s. Exception: %(ex)s" %
|
||||
{"volume_id": volume["volume_id"],
|
||||
"tenant": volume["endpoint"].tenant_name,
|
||||
"user": volume["endpoint"].username,
|
||||
"ex": ex})
|
||||
# TODO(boris-42): Delete only resources created by this context
|
||||
resource_manager.cleanup(names=["cinder.volumes"],
|
||||
users=self.context.get("users", []))
|
||||
|
@ -1,162 +0,0 @@
|
||||
# Copyright 2014: Mirantis Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import mock
|
||||
|
||||
from rally.benchmark.context.cleanup import utils
|
||||
from rally.benchmark import scenarios
|
||||
from tests.unit import fakes
|
||||
from tests.unit import test
|
||||
|
||||
|
||||
class CleanupUtilsTestCase(test.TestCase):
|
||||
|
||||
def test_delete_neutron_resources(self):
|
||||
neutron = fakes.FakeClients().neutron()
|
||||
scenario = scenarios.neutron.utils.NeutronScenario()
|
||||
scenario.context = mock.Mock(return_value={"iteration": 1})
|
||||
scenario.clients = lambda ins: neutron
|
||||
|
||||
network1 = scenario._create_network({})
|
||||
subnet1 = scenario._create_subnet(network1, 1, {})
|
||||
router1 = scenario._create_router({})
|
||||
# This also creates a port
|
||||
neutron.add_interface_router(router1["router"]["id"],
|
||||
{"subnet_id": subnet1["subnet"]["id"]})
|
||||
network2 = scenario._create_network({})
|
||||
scenario._create_subnet(network2, 1, {})
|
||||
scenario._create_router({})
|
||||
scenario._create_port(network2, {})
|
||||
|
||||
total = lambda neutron: (len(neutron.list_networks()["networks"])
|
||||
+ len(neutron.list_subnets()["subnets"])
|
||||
+ len(neutron.list_routers()["routers"])
|
||||
+ len(neutron.list_ports()["ports"]))
|
||||
|
||||
self.assertEqual(total(neutron), 8)
|
||||
|
||||
utils.delete_neutron_resources(neutron,
|
||||
network1["network"]["tenant_id"])
|
||||
|
||||
self.assertEqual(total(neutron), 0)
|
||||
|
||||
def test_delete_sahara_resources(self):
|
||||
|
||||
sahara = fakes.FakeClients().sahara()
|
||||
utils.delete_sahara_resources(sahara)
|
||||
|
||||
sahara.job_executions.delete.assert_called_once_with(42)
|
||||
sahara.jobs.delete.assert_called_once_with(42)
|
||||
sahara.job_binary_internals.delete.assert_called_once_with(42)
|
||||
sahara.job_binaries.delete.assert_called_once_with(42)
|
||||
sahara.data_sources.delete.assert_called_once_with(42)
|
||||
|
||||
sahara.clusters.delete.assert_called_once_with(42)
|
||||
sahara.cluster_templates.delete.assert_called_once_with(42)
|
||||
sahara.node_group_templates.delete.assert_called_once_with(42)
|
||||
|
||||
def test_delete_cinder_resources(self):
|
||||
cinder = fakes.FakeClients().cinder()
|
||||
scenario = scenarios.cinder.utils.CinderScenario()
|
||||
scenario.clients = lambda ins: cinder
|
||||
vol1 = scenario._create_volume(1)
|
||||
scenario._create_snapshot(vol1.id)
|
||||
cinder.transfers.create("dummy")
|
||||
cinder.backups.create("dummy")
|
||||
|
||||
total = lambda cinder: (len(cinder.volumes.list())
|
||||
+ len(cinder.volume_snapshots.list(
|
||||
))
|
||||
+ len(cinder.transfers.list())
|
||||
+ len(cinder.backups.list()))
|
||||
self.assertEqual(total(cinder), 4)
|
||||
utils.delete_cinder_resources(cinder)
|
||||
self.assertEqual(total(cinder), 0)
|
||||
|
||||
def test_delete_nova_resources(self):
|
||||
nova = fakes.FakeClients().nova()
|
||||
nova.servers.create("dummy", None, None)
|
||||
nova.keypairs.create("dummy")
|
||||
nova.security_groups.create("dummy")
|
||||
total = lambda nova: (len(nova.servers.list())
|
||||
+ len(nova.keypairs.list())
|
||||
+ len(nova.security_groups.list()))
|
||||
self.assertEqual(total(nova), 4)
|
||||
utils.delete_nova_resources(nova)
|
||||
self.assertEqual(total(nova), 1)
|
||||
|
||||
def test_delete_heat_resources(self):
|
||||
heat = fakes.FakeClients().heat()
|
||||
heat.stacks.create("dummy")
|
||||
total = lambda heat: (len(heat.stacks.list()))
|
||||
self.assertEqual(total(heat), 1)
|
||||
utils.delete_heat_resources(heat)
|
||||
self.assertEqual(total(heat), 0)
|
||||
|
||||
def test_delete_designate_resources(self):
|
||||
designate = fakes.FakeClients().designate()
|
||||
designate.domains.create("dummy")
|
||||
total = lambda designate: (len(designate.domains.list()))
|
||||
self.assertEqual(total(designate), 1)
|
||||
utils.delete_designate_resources(designate)
|
||||
self.assertEqual(total(designate), 0)
|
||||
|
||||
def test_delete_ceilometer_resources(self):
|
||||
ceilometer = fakes.FakeClients().ceilometer()
|
||||
ceilometer.alarms.create()
|
||||
total = lambda ceilometer: (len(ceilometer.alarms.list()))
|
||||
self.assertEqual(total(ceilometer), 1)
|
||||
utils.delete_ceilometer_resources(ceilometer, "dummy")
|
||||
self.assertEqual(total(ceilometer), 0)
|
||||
|
||||
def test_delete_admin_quotas(self):
|
||||
tenant1 = {'id': 1}
|
||||
tenant2 = {'id': 2}
|
||||
client = fakes.FakeClients()
|
||||
utils.delete_admin_quotas(client, [tenant1, tenant2])
|
||||
self.assertFalse(client.nova().quotas.list())
|
||||
self.assertFalse(client.cinder().quotas.list())
|
||||
|
||||
@mock.patch('rally.benchmark.wrappers.keystone.wrap')
|
||||
def test_delete_keystone_resources(self, mock_wrap):
|
||||
keystone = fakes.FakeClients().keystone()
|
||||
mock_wrap.return_value = keystone
|
||||
keystone.users.create("rally_keystone_dummy", None, None, None)
|
||||
total = lambda keystone: (len(keystone.users.list()))
|
||||
self.assertEqual(total(keystone), 1)
|
||||
utils.delete_keystone_resources(keystone)
|
||||
self.assertEqual(total(keystone), 0)
|
||||
|
||||
def test_delete_glance_resources(self):
|
||||
glance = fakes.FakeClients().glance()
|
||||
glance.images.create("dummy", None, None, None)
|
||||
total = lambda glance: (len(glance.images.list()))
|
||||
self.assertEqual(total(glance), 1)
|
||||
utils.delete_glance_resources(glance, "dummy")
|
||||
self.assertEqual(total(glance), 0)
|
||||
|
||||
def test_delete_zaqar_resources(self):
|
||||
zaqar = fakes.FakeClients().zaqar()
|
||||
messages = [{'body': {'id': idx}, 'ttl': 360} for idx in range(20)]
|
||||
queue = zaqar.queue("fizbit")
|
||||
queue.post_message(messages)
|
||||
messages_no = lambda queue: (len(queue.messages.list()))
|
||||
queues_no = lambda zaqar: (len(zaqar.queues.list()))
|
||||
self.assertEqual(messages_no(queue), 20)
|
||||
self.assertEqual(queues_no(zaqar), 1)
|
||||
utils.delete_zaqar_resources(zaqar)
|
||||
self.assertEqual(messages_no(queue), 0)
|
||||
self.assertEqual(queues_no(zaqar), 0)
|
@ -63,12 +63,12 @@ class SaharaClusterTestCase(test.TestCase):
|
||||
"sahara_images": self.images
|
||||
}
|
||||
|
||||
@mock.patch("%s.sahara_cluster.cleanup_utils" % CTX)
|
||||
@mock.patch("%s.sahara_cluster.resource_manager.cleanup" % CTX)
|
||||
@mock.patch("%s.sahara_cluster.utils.SaharaScenario._launch_cluster" % CTX,
|
||||
return_value=mock.MagicMock(id=42))
|
||||
@mock.patch("%s.sahara_cluster.osclients" % CTX)
|
||||
def test_setup_and_cleanup(self, mock_osclients,
|
||||
mock_launch, mock_cleanup_utils):
|
||||
mock_launch, mock_cleanup):
|
||||
|
||||
mock_sahara = mock_osclients.Clients(mock.MagicMock()).sahara()
|
||||
|
||||
@ -100,16 +100,13 @@ class SaharaClusterTestCase(test.TestCase):
|
||||
|
||||
mock_launch.assert_has_calls(launch_cluster_calls)
|
||||
sahara_ctx.cleanup()
|
||||
self.assertEqual(
|
||||
self.tenants_num,
|
||||
len(mock_cleanup_utils.delete_clusters.mock_calls))
|
||||
mock_cleanup.assert_called_once_with(names=["sahara.clusters"],
|
||||
users=ctx["users"])
|
||||
|
||||
@mock.patch("%s.sahara_cluster.cleanup_utils" % CTX)
|
||||
@mock.patch("%s.sahara_cluster.utils.SaharaScenario._launch_cluster" % CTX,
|
||||
return_value=mock.MagicMock(id=42))
|
||||
@mock.patch("%s.sahara_cluster.osclients" % CTX)
|
||||
def test_setup_and_cleanup_error(self, mock_osclients,
|
||||
mock_launch, mock_cleanup_utils):
|
||||
def test_setup_and_cleanup_error(self, mock_osclients, mock_launch):
|
||||
|
||||
mock_sahara = mock_osclients.Clients(mock.MagicMock()).sahara()
|
||||
|
||||
|
@ -61,11 +61,11 @@ class SaharaEDPTestCase(test.TestCase):
|
||||
"users": self.user_key,
|
||||
}
|
||||
|
||||
@mock.patch("%s.sahara_edp.cleanup_utils" % CTX)
|
||||
@mock.patch("%s.sahara_edp.resource_manager.cleanup" % CTX)
|
||||
@mock.patch("%s.sahara_edp.urllib2" % CTX)
|
||||
@mock.patch("%s.sahara_edp.osclients" % CTX)
|
||||
def test_setup_and_cleanup(self, mock_osclients, mock_urllib,
|
||||
mock_cleanup_utils):
|
||||
mock_cleanup):
|
||||
|
||||
mock_sahara = mock_osclients.Clients(mock.MagicMock()).sahara()
|
||||
mock_sahara.data_sources.create.return_value = mock.MagicMock(id=42)
|
||||
@ -106,22 +106,9 @@ class SaharaEDPTestCase(test.TestCase):
|
||||
mock_sahara.job_binaries.create.assert_has_calls(job_binaries_calls)
|
||||
|
||||
sahara_ctx.cleanup()
|
||||
self.assertEqual(
|
||||
self.tenants_num,
|
||||
len(mock_cleanup_utils.delete_job_executions.mock_calls))
|
||||
|
||||
self.assertEqual(
|
||||
self.tenants_num,
|
||||
len(mock_cleanup_utils.delete_jobs.mock_calls))
|
||||
|
||||
self.assertEqual(
|
||||
self.tenants_num,
|
||||
len(mock_cleanup_utils.delete_job_binary_internals.mock_calls))
|
||||
|
||||
self.assertEqual(
|
||||
self.tenants_num,
|
||||
len(mock_cleanup_utils.delete_job_binaries.mock_calls))
|
||||
|
||||
self.assertEqual(
|
||||
self.tenants_num,
|
||||
len(mock_cleanup_utils.delete_data_sources.mock_calls))
|
||||
mock_cleanup.assert_called_once_with(
|
||||
names=["sahara.job_executions", "sahara.jobs",
|
||||
"sahara.job_binary_internals", "sahara.job_binaries",
|
||||
"sahara.data_sources"],
|
||||
users=ctx["users"])
|
||||
|
@ -15,9 +15,9 @@
|
||||
import mock
|
||||
|
||||
from rally.benchmark.context.sahara import sahara_image
|
||||
from rally import exceptions
|
||||
from tests.unit import test
|
||||
|
||||
|
||||
BASE_CTX = "rally.benchmark.context"
|
||||
CTX = "rally.benchmark.context.sahara"
|
||||
SCN = "rally.benchmark.scenarios"
|
||||
@ -61,8 +61,8 @@ class SaharaImageTestCase(test.TestCase):
|
||||
@mock.patch("%s.glance.utils.GlanceScenario._create_image" % SCN,
|
||||
return_value=mock.MagicMock(id=42))
|
||||
@mock.patch("%s.sahara_image.osclients" % CTX)
|
||||
@mock.patch("%s.cleanup.utils.delete_glance_resources" % BASE_CTX)
|
||||
def test_setup_and_cleanup(self, mock_image_remover, mock_osclients,
|
||||
@mock.patch("%s.sahara_image.resource_manager.cleanup" % CTX)
|
||||
def test_setup_and_cleanup(self, mock_cleanup, mock_osclients,
|
||||
mock_image_generator, mock_uuid):
|
||||
|
||||
ctx = self.context_without_images_key
|
||||
@ -97,7 +97,5 @@ class SaharaImageTestCase(test.TestCase):
|
||||
sahara_update_tags_calls)
|
||||
|
||||
sahara_ctx.cleanup()
|
||||
self.assertEqual(self.tenants_num, len(mock_image_remover.mock_calls))
|
||||
|
||||
mock_image_remover.side_effect = Exception('failed_deletion')
|
||||
self.assertRaises(exceptions.ImageCleanUpException, sahara_ctx.cleanup)
|
||||
mock_cleanup.assert_called_once_with(names=["glance.images"],
|
||||
users=ctx["users"])
|
||||
|
@ -102,8 +102,8 @@ class ImageGeneratorTestCase(test.TestCase):
|
||||
self.assertEqual(new_context, real_context)
|
||||
|
||||
@mock.patch("%s.images.osclients" % CTX)
|
||||
@mock.patch("%s.cleanup.utils.delete_glance_resources" % CTX)
|
||||
def test_cleanup(self, mock_image_remover, mock_osclients):
|
||||
@mock.patch("%s.images.resource_manager.cleanup" % CTX)
|
||||
def test_cleanup(self, mock_cleanup, mock_osclients):
|
||||
image_list = ["uuid"] * 5
|
||||
image_key = [{'image_id': image_list, 'endpoint': 'endpoint',
|
||||
'tenant_id': i} for i in range(2)]
|
||||
@ -135,11 +135,8 @@ class ImageGeneratorTestCase(test.TestCase):
|
||||
|
||||
images_ctx = images.ImageGenerator(context)
|
||||
images_ctx.cleanup()
|
||||
|
||||
self.assertEqual(2, len(mock_image_remover.mock_calls))
|
||||
|
||||
mock_image_remover.side_effect = Exception('failed_deletion')
|
||||
self.assertRaises(exceptions.ImageCleanUpException, images_ctx.cleanup)
|
||||
mock_cleanup.assert_called_once_with(names=["glance.images"],
|
||||
users=context["users"])
|
||||
|
||||
def test_validate_semantic(self):
|
||||
users = [fakes.FakeClients()]
|
||||
|
@ -45,36 +45,19 @@ class KeyPairContextTestCase(test.TestCase):
|
||||
keypair_ctx.setup()
|
||||
self.assertEqual(self.ctx_without_keys, self.ctx_with_keys)
|
||||
|
||||
@mock.patch('rally.osclients.Clients')
|
||||
@mock.patch("%s.keypair.Keypair._keypair_safe_remove" % CTX)
|
||||
def test_keypair_cleanup(self, mock_safe_remove, mock_osclients):
|
||||
@mock.patch("%s.keypair.resource_manager.cleanup" % CTX)
|
||||
def test_keypair_cleanup(self, mock_cleanup):
|
||||
keypair_ctx = keypair.Keypair(self.ctx_with_keys)
|
||||
keypair_ctx.cleanup()
|
||||
mock_clients = mock_osclients.return_value
|
||||
mock_nova = mock_clients.nova.return_value
|
||||
self.assertEqual(
|
||||
[mock.call(mock_nova)]
|
||||
* self.users,
|
||||
mock_safe_remove.mock_calls
|
||||
)
|
||||
mock_cleanup.assert_called_once_with(names=["nova.keypairs"],
|
||||
users=self.ctx_with_keys["users"])
|
||||
|
||||
@mock.patch("%s.keypair.Keypair._keypair_safe_remove" % CTX)
|
||||
@mock.patch('rally.osclients.Clients')
|
||||
def test_keypair_generate(self, mock_osclients, mock_safe_remove):
|
||||
@mock.patch("rally.osclients.Clients")
|
||||
def test_keypair_generate(self, mock_osclients):
|
||||
keypair_ctx = keypair.Keypair(self.ctx_without_keys)
|
||||
keypair_ctx._generate_keypair('endpoint')
|
||||
mock_clients = mock_osclients.return_value
|
||||
mock_nova = mock_clients.nova.return_value
|
||||
self.assertIn(
|
||||
mock.call().nova().keypairs.create('rally_ssh_key'),
|
||||
mock_osclients.mock_calls
|
||||
)
|
||||
mock_safe_remove.assert_called_once_with(mock_nova)
|
||||
keypair_ctx._generate_keypair("endpoint")
|
||||
|
||||
def test_keypair_safe_remove(self):
|
||||
mock_nova = mock.MagicMock()
|
||||
keypair_ctx = keypair.Keypair(self.ctx_without_keys)
|
||||
keypair_ctx._keypair_safe_remove(mock_nova)
|
||||
self.assertEqual(
|
||||
[mock.call.delete('rally_ssh_key')],
|
||||
mock_nova.keypairs.mock_calls)
|
||||
mock_osclients.assert_has_calls([
|
||||
mock.call().nova().keypairs.delete("rally_ssh_key"),
|
||||
mock.call().nova().keypairs.create("rally_ssh_key"),
|
||||
])
|
||||
|
@ -80,8 +80,8 @@ class VolumeGeneratorTestCase(test.TestCase):
|
||||
self.assertEqual(new_context, real_context)
|
||||
|
||||
@mock.patch("%s.volumes.osclients" % CTX)
|
||||
@mock.patch("%s.cleanup.utils.delete_cinder_resources" % CTX)
|
||||
def test_cleanup(self, mock_cinder_remover, mock_osclients):
|
||||
@mock.patch("%s.volumes.resource_manager.cleanup" % CTX)
|
||||
def test_cleanup(self, mock_cleanup, mock_osclients):
|
||||
ctx_volumes = [
|
||||
{'volume_id': 'uuid', 'endpoint': mock.MagicMock(), 'tenant_id': i}
|
||||
for i in range(2)]
|
||||
@ -111,38 +111,5 @@ class VolumeGeneratorTestCase(test.TestCase):
|
||||
volumes_ctx = volumes.VolumeGenerator(context)
|
||||
volumes_ctx.cleanup()
|
||||
|
||||
self.assertEqual(2, len(mock_cinder_remover.mock_calls))
|
||||
|
||||
@mock.patch("%s.volumes.osclients" % CTX)
|
||||
@mock.patch("%s.cleanup.utils.delete_cinder_resources" % CTX)
|
||||
def test_cleanup_exception(self, mock_cinder_remover, mock_osclients):
|
||||
ctx_volumes = [
|
||||
{'volume_id': 'uuid', 'endpoint': mock.MagicMock(), 'tenant_id': i}
|
||||
for i in range(2)]
|
||||
user_key = [{'id': i, 'tenant_id': j, 'endpoint': 'endpoint'}
|
||||
for j in range(2)
|
||||
for i in range(5)]
|
||||
|
||||
context = {
|
||||
"config": {
|
||||
"users": {
|
||||
"tenants": 2,
|
||||
"users_per_tenant": 5,
|
||||
"concurrent": 10,
|
||||
},
|
||||
"volumes": {
|
||||
"size": 1,
|
||||
}
|
||||
},
|
||||
"admin": {
|
||||
"endpoint": mock.MagicMock()
|
||||
},
|
||||
"task": mock.MagicMock(),
|
||||
"users": user_key,
|
||||
"volumes": ctx_volumes,
|
||||
}
|
||||
|
||||
mock_cinder_remover.side_effect = Exception()
|
||||
volumes_ctx = volumes.VolumeGenerator(context)
|
||||
volumes_ctx.cleanup()
|
||||
self.assertEqual(2, len(mock_cinder_remover.mock_calls))
|
||||
mock_cleanup.assert_called_once_with(names=["cinder.volumes"],
|
||||
users=context["users"])
|
||||
|
Loading…
Reference in New Issue
Block a user