Refactor UserGenerator

1. In the current implementation, it is possible for the UserGenerator to
   create a tenant in _create_tenant_users() and then fail at user creation,
   without returning the created tenant data. As a result, created tenants don't
   get deleted during cleanup.

   Here we refactor the tenants / users creation code by splitting it to two
   different methods, both launched via the Broker pattern.

   We remove the run_concurrent() method that is no longer used.

2. To fix a security issue, we change the password used for temporary users from
   "password" to a randomly generated uuid.

3. We rename the "concurrent" config option in the user context to
   "resource_management_workers".

Change-Id: I0b7f6b5677bb564c044c93aa1a9aeafc3b9f53cc
Closes-Bug: #1347215
Closes-Bug: #1365758
This commit is contained in:
Mikhail Dubov 2014-09-26 10:18:50 +04:00
parent fa7d92635f
commit fa909ba8e9
6 changed files with 248 additions and 234 deletions

View File

@ -254,6 +254,20 @@
sla:
max_failure_percent: 0
-
args:
sleep: 0.01
runner:
type: "constant"
times: 1
concurrency: 1
context:
users:
tenants: 30
users_per_tenant: 15
sla:
max_failure_percent: 0
Dummy.dummy_exception:
-
args:

View File

@ -13,12 +13,16 @@
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo.config import cfg
from rally.benchmark.context import base
from rally.benchmark import utils
from rally.benchmark.wrappers import keystone
from rally import broker
from rally import consts
from rally import exceptions
from rally.i18n import _
from rally.objects import endpoint
from rally.openstack.common import log as logging
@ -29,7 +33,7 @@ from rally import utils as rutils
LOG = logging.getLogger(__name__)
context_opts = [
cfg.IntOpt("concurrent",
cfg.IntOpt("resource_management_workers",
default=30,
help="How many concurrent threads use for serving users "
"context"),
@ -66,7 +70,7 @@ class UserGenerator(base.Context):
"type": "integer",
"minimum": 1
},
"concurrent": {
"resource_management_workers": {
"type": "integer",
"minimum": 1
},
@ -86,8 +90,9 @@ class UserGenerator(base.Context):
super(UserGenerator, self).__init__(context)
self.config.setdefault("tenants", 1)
self.config.setdefault("users_per_tenant", 1)
self.config.setdefault("concurrent",
cfg.CONF.users_context.concurrent)
self.config.setdefault(
"resource_management_workers",
cfg.CONF.users_context.resource_management_workers)
self.config.setdefault("project_domain",
cfg.CONF.users_context.project_domain)
self.config.setdefault("user_domain",
@ -101,49 +106,12 @@ class UserGenerator(base.Context):
# and change a bit logic of populating lists of users
# and tenants
@classmethod
def _create_tenant_users(cls, args):
"""Create tenant with users and their endpoints.
This is suitable for using with pool of threads.
:param args: tuple arguments, for Pool.imap()
:returns: tuple (dict tenant, list users)
"""
admin_endpoint, users_num, project_dom, user_dom, task_id, i = args
users = []
client = keystone.wrap(osclients.Clients(admin_endpoint).keystone())
tenant = client.create_project(
cls.PATTERN_TENANT % {"task_id": task_id, "iter": i}, project_dom)
LOG.debug("Creating %d users for tenant %s" % (users_num, tenant.id))
for user_id in range(users_num):
username = cls.PATTERN_USER % {"tenant_id": tenant.id,
"uid": user_id}
user = client.create_user(username, "password",
"%s@email.me" % username, tenant.id,
user_dom)
user_endpoint = endpoint.Endpoint(client.auth_url, user.name,
"password", tenant.name,
consts.EndpointPermission.USER,
client.region_name,
project_domain_name=project_dom,
user_domain_name=user_dom)
users.append({"id": user.id,
"endpoint": user_endpoint,
"tenant_id": tenant.id})
return ({"id": tenant.id, "name": tenant.name}, users)
@staticmethod
def _remove_associated_networks(admin_endpoint, tenants):
def _remove_associated_networks(self):
"""Delete associated Nova networks from tenants."""
# NOTE(rmk): Ugly hack to deal with the fact that Nova Network
# networks can only be disassociated in an admin context. Discussed
# with boris-42 before taking this approach [LP-Bug #1350517].
clients = osclients.Clients(admin_endpoint)
clients = osclients.Clients(self.endpoint)
if consts.Service.NOVA not in clients.services().values():
return
@ -154,7 +122,7 @@ class UserGenerator(base.Context):
for network in nova_admin.networks.list():
network_tenant_id = nova_admin.networks.get(network).project_id
for tenant in tenants:
for tenant in self.context["tenants"]:
if tenant["id"] == network_tenant_id:
try:
nova_admin.networks.disassociate(network)
@ -163,81 +131,128 @@ class UserGenerator(base.Context):
"Exception: %(ex)s" %
{"tenant_id": tenant["id"], "ex": ex})
@classmethod
def _delete_tenants(cls, args):
"""Delete given tenants.
def _create_tenants(self):
threads = self.config["resource_management_workers"]
:param args: tuple arguments, for Pool.imap()
"""
admin_endpoint, tenants = args
cls._remove_associated_networks(admin_endpoint, tenants)
tenants = []
client = keystone.wrap(osclients.Clients(admin_endpoint).keystone())
def publish(queue):
for i in range(self.config["tenants"]):
args = (self.config["project_domain"], self.task["uuid"], i)
queue.append(args)
for tenant in tenants:
try:
client.delete_project(tenant["id"])
except Exception as ex:
LOG.warning("Failed to delete tenant: %(tenant_id)s. "
"Exception: %(ex)s" %
{"tenant_id": tenant["id"], "ex": ex})
def consume(cache, args):
domain, task_id, i = args
if "client" not in cache:
clients = osclients.Clients(self.endpoint)
cache["client"] = keystone.wrap(clients.keystone())
tenant = cache["client"].create_project(
self.PATTERN_TENANT % {"task_id": task_id, "iter": i}, domain)
tenant_dict = {"id": tenant.id, "name": tenant.name}
tenants.append(tenant_dict)
@classmethod
def _delete_users(cls, args):
"""Delete given users.
# NOTE(msdubov): cosume() will fill the tenants list in the closure.
broker.run(publish, consume, threads)
return tenants
:param args: tuple arguments, for Pool.imap()
"""
admin_endpoint, users = args
client = keystone.wrap(osclients.Clients(admin_endpoint).keystone())
def _create_users(self):
# NOTE(msdubov): This should be called after _create_tenants().
threads = self.config["resource_management_workers"]
users_per_tenant = self.config["users_per_tenant"]
for user in users:
try:
client.delete_user(user["id"])
except Exception as ex:
LOG.warning("Failed to delete user: %(user_id)s. "
"Exception: %(ex)s" %
{"user_id": user["id"], "ex": ex})
users = []
def publish(queue):
for tenant in self.context["tenants"]:
for user_id in range(users_per_tenant):
username = self.PATTERN_USER % {"tenant_id": tenant["id"],
"uid": user_id}
password = str(uuid.uuid4())
args = (username, password, self.config["project_domain"],
self.config["user_domain"], tenant)
queue.append(args)
def consume(cache, args):
username, password, project_dom, user_dom, tenant = args
if "client" not in cache:
clients = osclients.Clients(self.endpoint)
cache["client"] = keystone.wrap(clients.keystone())
client = cache["client"]
user = client.create_user(username, password,
"%s@email.me" % username,
tenant["id"], user_dom)
user_endpoint = endpoint.Endpoint(
client.auth_url, user.name, password, tenant["name"],
consts.EndpointPermission.USER, client.region_name,
project_domain_name=project_dom, user_domain_name=user_dom)
users.append({"id": user.id,
"endpoint": user_endpoint,
"tenant_id": tenant["id"]})
# NOTE(msdubov): cosume() will fill the users list in the closure.
broker.run(publish, consume, threads)
return users
def _delete_tenants(self):
threads = self.config["resource_management_workers"]
self._remove_associated_networks()
def publish(queue):
for tenant in self.context["tenants"]:
queue.append(tenant["id"])
def consume(cache, tenant_id):
if "client" not in cache:
clients = osclients.Clients(self.endpoint)
cache["client"] = keystone.wrap(clients.keystone())
cache["client"].delete_project(tenant_id)
broker.run(publish, consume, threads)
self.context["tenants"] = []
def _delete_users(self):
threads = self.config["resource_management_workers"]
def publish(queue):
for user in self.context["users"]:
queue.append(user["id"])
def consume(cache, user_id):
if "client" not in cache:
clients = osclients.Clients(self.endpoint)
cache["client"] = keystone.wrap(clients.keystone())
cache["client"].delete_user(user_id)
broker.run(publish, consume, threads)
self.context["users"] = []
@rutils.log_task_wrapper(LOG.info, _("Enter context: `users`"))
def setup(self):
"""Create tenants and users, using pool of threads."""
"""Create tenants and users, using the broker pattern."""
threads = self.config["resource_management_workers"]
users_num = self.config["users_per_tenant"]
LOG.debug("Creating %(tenants)d tenants using %(threads)s threads" %
{"tenants": self.config["tenants"], "threads": threads})
self.context["tenants"] = self._create_tenants()
args = [(self.endpoint, users_num, self.config["project_domain"],
self.config["user_domain"], self.task["uuid"], i)
for i in range(self.config["tenants"])]
if len(self.context["tenants"]) < self.config["tenants"]:
raise exceptions.ContextSetupFailure(
ctx_name=self.__ctx_name__,
msg=_("Failed to create the requested number of tenants."))
LOG.debug("Creating %d users using %s threads" % (
users_num * self.config["tenants"], self.config["concurrent"]))
users_num = self.config["users_per_tenant"] * self.config["tenants"]
LOG.debug("Creating %(users)d users using %(threads)s threads" %
{"users": users_num, "threads": threads})
self.context["users"] = self._create_users()
for tenant, users in utils.run_concurrent(
self.config["concurrent"],
UserGenerator,
"_create_tenant_users",
args):
self.context["tenants"].append(tenant)
self.context["users"] += users
if len(self.context["users"]) < users_num:
raise exceptions.ContextSetupFailure(
ctx_name=self.__ctx_name__,
msg=_("Failed to create the requested number of users."))
@rutils.log_task_wrapper(LOG.info, _("Exit context: `users`"))
def cleanup(self):
"""Delete tenants and users, using pool of threads."""
concurrent = self.config["concurrent"]
# Delete users
users_chunks = utils.chunks(self.context["users"], concurrent)
utils.run_concurrent(
concurrent,
UserGenerator,
"_delete_users",
[(self.endpoint, users) for users in users_chunks])
# Delete tenants
tenants_chunks = utils.chunks(self.context["tenants"], concurrent)
utils.run_concurrent(
concurrent,
UserGenerator,
"_delete_tenants",
[(self.endpoint, tenants) for tenants in tenants_chunks])
"""Delete tenants and users, using the broker pattern."""
self._delete_users()
self._delete_tenants()

View File

@ -15,7 +15,6 @@
import itertools
import logging
import multiprocessing
import time
import traceback
@ -27,19 +26,6 @@ from rally import exceptions
LOG = logging.getLogger(__name__)
def chunks(data, step):
"""Split collection into chunks.
:param data: collection to split, only list or tuple are allowed
:param step: int chunk size
:returns: list of collection chunks
>>> chunks([1,2,3,4,5,6,7,8,9,10], 3)
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10]]
"""
return [data[x:x + step] for x in xrange(0, len(data), step)]
def resource_is(status):
return lambda resource: resource.status.upper() == status.upper()
@ -156,30 +142,6 @@ def infinite_run_args_generator(args_func):
yield args_func(i)
def run_concurrent_helper(args):
cls, method, fn_args = args
return getattr(cls, method)(fn_args)
def run_concurrent(concurrent, cls, fn, fn_args):
"""Run given function using pool of threads.
:param concurrent: number of threads in the pool
:param cls: class to be called in the pool
:param fn: class method to be called in the pool
:param fn_args: list of arguments for function fn() in the pool
:returns: iterator from Pool.imap()
"""
pool = multiprocessing.Pool(concurrent)
iterator = pool.imap(run_concurrent_helper,
[(cls, fn, args) for args in fn_args])
pool.close()
pool.join()
return iterator
def check_service_status(client, service_name):
"""Check if given openstack service is enabled and state is up."""
for service in client.services.list():

View File

@ -215,6 +215,10 @@ class BenchmarkSetupFailure(RallyException):
msg_fmt = _("Unable to setup benchmark: '%(message)s'")
class ContextSetupFailure(RallyException):
msg_fmt = _("Unable to setup context '%(ctx_name)s': '%(msg)s'")
class ValidationError(RallyException):
msg_fmt = _("Validation error: %(message)s")

View File

@ -13,26 +13,19 @@
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import mock
from rally.benchmark.context import users
from rally.benchmark import utils
from rally import exceptions
from tests.unit import test
run_concurrent = (lambda dummy, cls, f, args: list(
itertools.imap(getattr(cls, f), args)))
@mock.patch.object(utils, "run_concurrent", run_concurrent)
class UserGeneratorTestCase(test.TestCase):
tenants_num = 10
users_per_tenant = 5
users_num = tenants_num * users_per_tenant
concurrent = 10
threads = 10
@property
def context(self):
@ -41,7 +34,7 @@ class UserGeneratorTestCase(test.TestCase):
"users": {
"tenants": self.tenants_num,
"users_per_tenant": self.users_per_tenant,
"concurrent": self.concurrent,
"resource_management_workers": self.threads,
}
},
"admin": {"endpoint": mock.MagicMock()},
@ -54,34 +47,13 @@ class UserGeneratorTestCase(test.TestCase):
"rally.benchmark.context.users.osclients")
self.osclients = self.osclients_patcher.start()
self.keystone_wrapper_patcher = mock.patch(
"rally.benchmark.context.users.keystone")
self.keystone_wrapper = self.keystone_wrapper_patcher.start()
self.wrapped_keystone = self.keystone_wrapper.wrap.return_value
def tearDown(self):
self.keystone_wrapper_patcher.stop()
self.osclients_patcher.stop()
super(UserGeneratorTestCase, self).tearDown()
def test_create_tenant_users(self):
users_num = 5
args = (mock.MagicMock(), users_num, 'default', 'default',
'ad325aec-f7b4-4a62-832a-bb718e465bb7', 1)
result = users.UserGenerator._create_tenant_users(args)
self.assertEqual(len(result), 2)
tenant, users_ = result
self.assertIn("id", tenant)
self.assertIn("name", tenant)
self.assertEqual(len(users_), users_num)
for user in users_:
self.assertIn("id", user)
self.assertIn("endpoint", user)
@mock.patch("rally.benchmark.utils.check_service_status",
return_value=True)
def test_remove_associated_networks(self, mock_check_service_status):
def test__remove_associated_networks(self, mock_check_service_status):
def fake_get_network(req_network):
for network in networks:
if network.project_id == req_network.project_id:
@ -91,7 +63,6 @@ class UserGeneratorTestCase(test.TestCase):
tenant2 = {'id': 4}
networks = [mock.MagicMock(project_id=1),
mock.MagicMock(project_id=2)]
admin_endpoint, tenants = (mock.MagicMock(), [tenant1, tenant2])
nova_admin = mock.MagicMock()
clients = mock.MagicMock()
self.osclients.Clients.return_value = clients
@ -99,15 +70,19 @@ class UserGeneratorTestCase(test.TestCase):
clients.nova.return_value = nova_admin
nova_admin.networks.list.return_value = networks
nova_admin.networks.get = fake_get_network
users.UserGenerator._remove_associated_networks(admin_endpoint,
tenants)
context = {"admin": {"endpoint": mock.MagicMock()},
"task": mock.MagicMock()}
user_generator = users.UserGenerator(context)
user_generator.context["tenants"] = [tenant1, tenant2]
user_generator._remove_associated_networks()
mock_check_service_status.assert_called_once_with(mock.ANY,
'nova-network')
nova_admin.networks.disassociate.assert_called_once_with(networks[0])
@mock.patch("rally.benchmark.utils.check_service_status",
return_value=True)
def test_remove_associated_networks_fails(self, mock_check_service_status):
def test__remove_associated_networks_failure(self,
mock_check_service_status):
def fake_get_network(req_network):
for network in networks:
if network.project_id == req_network.project_id:
@ -117,7 +92,6 @@ class UserGeneratorTestCase(test.TestCase):
tenant2 = {'id': 4}
networks = [mock.MagicMock(project_id=1),
mock.MagicMock(project_id=2)]
admin_endpoint, tenants = (mock.MagicMock(), [tenant1, tenant2])
nova_admin = mock.MagicMock()
clients = mock.MagicMock()
self.osclients.Clients.return_value = clients
@ -126,61 +100,123 @@ class UserGeneratorTestCase(test.TestCase):
nova_admin.networks.list.return_value = networks
nova_admin.networks.get = fake_get_network
nova_admin.networks.disassociate.side_effect = Exception()
users.UserGenerator._remove_associated_networks(admin_endpoint,
tenants)
context = {"admin": {"endpoint": mock.MagicMock()},
"task": mock.MagicMock()}
user_generator = users.UserGenerator(context)
user_generator.context["tenants"] = [tenant1, tenant2]
user_generator._remove_associated_networks()
mock_check_service_status.assert_called_once_with(mock.ANY,
'nova-network')
nova_admin.networks.disassociate.assert_called_once_with(networks[0])
def test_delete_tenants(self):
@mock.patch("rally.benchmark.context.users.broker.time.sleep")
@mock.patch("rally.benchmark.context.users.keystone")
def test__create_tenants(self, mock_keystone, mock_sleep):
context = {"admin": {"endpoint": mock.MagicMock()},
"task": mock.MagicMock()}
user_generator = users.UserGenerator(context)
user_generator.config["tenants"] = 2
tenants = user_generator._create_tenants()
self.assertEqual(2, len(tenants))
for tenant in tenants:
self.assertIn("id", tenant)
self.assertIn("name", tenant)
@mock.patch("rally.benchmark.context.users.broker.time.sleep")
@mock.patch("rally.benchmark.context.users.keystone")
def test__create_users(self, mock_keystone, mock_sleep):
context = {"admin": {"endpoint": mock.MagicMock()},
"task": mock.MagicMock()}
user_generator = users.UserGenerator(context)
tenant1 = mock.MagicMock()
tenant2 = mock.MagicMock()
args = (mock.MagicMock(), [tenant1, tenant2])
users.UserGenerator._delete_tenants(args)
self.assertEqual(1, self.keystone_wrapper.wrap.call_count)
self.wrapped_keystone.delete_project.assert_has_calls([
mock.call(tenant1["id"]),
mock.call(tenant2["id"])])
user_generator.context["tenants"] = [tenant1, tenant2]
user_generator.config["users_per_tenant"] = 2
users_ = user_generator._create_users()
self.assertEqual(4, len(users_))
for user in users_:
self.assertIn("id", user)
self.assertIn("endpoint", user)
def test_delete_users(self):
@mock.patch("rally.benchmark.context.users.keystone")
def test__delete_tenants(self, mock_keystone):
context = {"admin": {"endpoint": mock.MagicMock()},
"task": mock.MagicMock()}
user_generator = users.UserGenerator(context)
tenant1 = mock.MagicMock()
tenant2 = mock.MagicMock()
user_generator.context["tenants"] = [tenant1, tenant2]
user_generator._delete_tenants()
self.assertEqual(len(user_generator.context["tenants"]), 0)
@mock.patch("rally.benchmark.context.users.keystone")
def test__delete_tenants_failure(self, mock_keystone):
wrapped_keystone = mock_keystone.wrap.return_value
wrapped_keystone.delete_project.side_effect = Exception()
context = {"admin": {"endpoint": mock.MagicMock()},
"task": mock.MagicMock()}
user_generator = users.UserGenerator(context)
tenant1 = mock.MagicMock()
tenant2 = mock.MagicMock()
user_generator.context["tenants"] = [tenant1, tenant2]
user_generator._delete_tenants()
self.assertEqual(len(user_generator.context["tenants"]), 0)
@mock.patch("rally.benchmark.context.users.keystone")
def test__delete_users(self, mock_keystone):
context = {"admin": {"endpoint": mock.MagicMock()},
"task": mock.MagicMock()}
user_generator = users.UserGenerator(context)
user1 = mock.MagicMock()
user2 = mock.MagicMock()
args = (mock.MagicMock(), [user1, user2])
users.UserGenerator._delete_users(args)
self.wrapped_keystone.delete_user.assert_has_calls([
mock.call(user1["id"]),
mock.call(user2["id"])])
user_generator.context["users"] = [user1, user2]
user_generator._delete_users()
self.assertEqual(len(user_generator.context["users"]), 0)
def test_setup_and_cleanup(self):
@mock.patch("rally.benchmark.context.users.keystone")
def test__delete_users_failure(self, mock_keystone):
wrapped_keystone = mock_keystone.wrap.return_value
wrapped_keystone.delete_user.side_effect = Exception()
context = {"admin": {"endpoint": mock.MagicMock()},
"task": mock.MagicMock()}
user_generator = users.UserGenerator(context)
user1 = mock.MagicMock()
user2 = mock.MagicMock()
user_generator.context["users"] = [user1, user2]
user_generator._delete_users()
self.assertEqual(len(user_generator.context["users"]), 0)
@mock.patch("rally.benchmark.context.users.keystone")
def test_setup_and_cleanup(self, mock_keystone):
wrapped_keystone = mock.MagicMock()
mock_keystone.wrap.return_value = wrapped_keystone
with users.UserGenerator(self.context) as ctx:
self.assertEqual(self.wrapped_keystone.create_user.call_count, 0)
self.assertEqual(self.wrapped_keystone.create_project.call_count,
0)
ctx.setup()
self.assertEqual(len(ctx.context["users"]),
self.users_num)
self.assertEqual(self.wrapped_keystone.create_user.call_count,
self.users_num)
self.assertEqual(len(ctx.context["tenants"]),
self.tenants_num)
self.assertEqual(self.wrapped_keystone.create_project.call_count,
self.tenants_num)
# Assert nothing is deleted yet
self.assertEqual(self.wrapped_keystone.delete_user.call_count,
0)
self.assertEqual(self.wrapped_keystone.delete_project.call_count,
0)
# Cleanup (called by content manager)
self.assertEqual(self.wrapped_keystone.delete_user.call_count,
self.users_num)
self.assertEqual(self.wrapped_keystone.delete_project.call_count,
self.tenants_num)
self.assertEqual(len(ctx.context["users"]), 0)
self.assertEqual(len(ctx.context["tenants"]), 0)
def test_users_and_tenants_in_context(self):
@mock.patch("rally.benchmark.context.users.keystone")
def test_setup_and_cleanup_failure(self, mock_keystone):
wrapped_keystone = mock_keystone.wrap.return_value
wrapped_keystone.create_user.side_effect = Exception()
with users.UserGenerator(self.context) as ctx:
self.assertRaises(exceptions.ContextSetupFailure, ctx.setup)
# Ensure that tenants get deleted anyway
self.assertEqual(len(ctx.context["tenants"]), 0)
@mock.patch("rally.benchmark.context.users.keystone")
def test_users_and_tenants_in_context(self, mock_keystone):
wrapped_keystone = mock.MagicMock()
mock_keystone.wrap.return_value = wrapped_keystone
task = {"uuid": "abcdef"}
config = {
@ -188,7 +224,7 @@ class UserGeneratorTestCase(test.TestCase):
"users": {
"tenants": 2,
"users_per_tenant": 2,
"concurrent": 1
"resource_management_workers": 1
}
},
"admin": {"endpoint": mock.MagicMock()},
@ -197,7 +233,7 @@ class UserGeneratorTestCase(test.TestCase):
user_list = [mock.MagicMock(id='id_%d' % i)
for i in range(self.users_num)]
self.wrapped_keystone.create_user.side_effect = user_list
wrapped_keystone.create_user.side_effect = user_list
with users.UserGenerator(config) as ctx:
ctx.setup()
@ -209,9 +245,6 @@ class UserGeneratorTestCase(test.TestCase):
mock.call(pattern % {"task_id": task["uuid"], "iter": i},
ctx.config["project_domain"]))
self.wrapped_keystone.create_project.assert_has_calls(
create_tenant_calls, any_order=True)
for user in ctx.context["users"]:
self.assertEqual(set(["id", "endpoint", "tenant_id"]),
set(user.keys()))

View File

@ -25,14 +25,6 @@ from tests.unit import test
class BenchmarkUtilsTestCase(test.TestCase):
def test_chunks(self):
data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
self.assertEqual(utils.chunks(data, 3),
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
self.assertEqual(utils.chunks(data, 5),
[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12]])
def test_wait_for_delete(self):
def update_resource(self):
raise exceptions.GetResourceNotFound()
@ -134,12 +126,6 @@ class BenchmarkUtilsTestCase(test.TestCase):
self.assertRaises(exceptions.GetResourceFailure,
get_from_manager, resource)
def test_run_concurrent_helper(self):
cls = mock.MagicMock()
args = (cls, "test", {})
result = utils.run_concurrent_helper(args)
self.assertEqual(cls.test(), result)
def test_check_service_status(self):
class service():
def __init__(self, name):