Cover Rally with docstrings & Test this coverage
Rally should have detailed docstrings for: * Benchmark scenario classes * Benchmark scenarios * Deploy engines * Server providers * SLA Here we add such docstrings and also add a test suite that checks that Rally is 100% covered with docstrings and that these docstrings are correctly formed. We also change the interface of the following benchmark scenarios (for the sake of unification): * CinderVolumes.create_and_attach_volume * CinderVolumes.create_snapshot_and_attach_volume * CinderVolumes.create_nested_snapshots_and_attach_volume Finally, we refactor a bit NovaServers.boot_and_bounce_server. Change-Id: Ia38c8fc2d692a09719d3e068d332647d4b0da47f
This commit is contained in:
parent
045603928c
commit
07bbc3094d
doc/samples
plugins/scenario
tasks/scenarios/cinder
rally-jobs
rally
aas/rest
benchmark/scenarios
authenticate
ceilometer
cinder
designate
dummy
glance
heat
keystone
neutron
nova
quotas
requests
sahara
tempest
vm
zaqar
tests/unit
@ -2,7 +2,7 @@ from rally.benchmark.scenarios import base
|
||||
|
||||
|
||||
class ScenarioPlugin(base.Scenario):
|
||||
"""Sample of plugin which lists flavors"""
|
||||
"""Sample plugin which lists flavors."""
|
||||
|
||||
@base.atomic_action_timer("list_flavors")
|
||||
def _list_flavors(self):
|
||||
@ -19,5 +19,6 @@ class ScenarioPlugin(base.Scenario):
|
||||
|
||||
@base.scenario()
|
||||
def list_flavors(self):
|
||||
"""List flavors."""
|
||||
self._list_flavors()
|
||||
self._list_flavors_as_admin()
|
||||
|
@ -2,7 +2,7 @@
|
||||
"CinderVolumes.create_and_attach_volume": [
|
||||
{
|
||||
"args": {
|
||||
"volume_size": 10,
|
||||
"size": 10,
|
||||
"image": {
|
||||
"name": "^cirros.*uec$"
|
||||
},
|
||||
|
@ -2,7 +2,7 @@
|
||||
CinderVolumes.create_and_attach_volume:
|
||||
-
|
||||
args:
|
||||
volume_size: 10
|
||||
size: 10
|
||||
image:
|
||||
name: "^cirros.*uec$"
|
||||
flavor:
|
||||
|
@ -3,7 +3,7 @@
|
||||
{
|
||||
"args": {
|
||||
"volume_type": false,
|
||||
"volume_size": {
|
||||
"size": {
|
||||
"min": 1,
|
||||
"max": 5
|
||||
}
|
||||
@ -32,7 +32,7 @@
|
||||
{
|
||||
"args": {
|
||||
"volume_type": true,
|
||||
"volume_size": {
|
||||
"size": {
|
||||
"min": 1,
|
||||
"max": 5
|
||||
}
|
||||
|
@ -3,7 +3,7 @@
|
||||
-
|
||||
args:
|
||||
volume_type: false
|
||||
volume_size:
|
||||
size:
|
||||
min: 1
|
||||
max: 5
|
||||
runner:
|
||||
@ -23,7 +23,7 @@
|
||||
-
|
||||
args:
|
||||
volume_type: true
|
||||
volume_size:
|
||||
size:
|
||||
min: 1
|
||||
max: 5
|
||||
runner:
|
||||
|
@ -2,7 +2,7 @@
|
||||
"CinderVolumes.create_nested_snapshots_and_attach_volume": [
|
||||
{
|
||||
"args": {
|
||||
"volume_size": {
|
||||
"size": {
|
||||
"min": 1,
|
||||
"max": 5
|
||||
},
|
||||
|
@ -2,7 +2,7 @@
|
||||
CinderVolumes.create_nested_snapshots_and_attach_volume:
|
||||
-
|
||||
args:
|
||||
volume_size:
|
||||
size:
|
||||
min: 1
|
||||
max: 5
|
||||
nested_level:
|
||||
|
@ -21,6 +21,7 @@ from rally.benchmark.scenarios import base
|
||||
|
||||
|
||||
class FakePlugin(base.Scenario):
|
||||
"""Fake plugin with a scenario."""
|
||||
|
||||
@base.atomic_action_timer("test1")
|
||||
def _test1(self, factor):
|
||||
@ -32,5 +33,9 @@ class FakePlugin(base.Scenario):
|
||||
|
||||
@base.scenario()
|
||||
def testplugin(self, factor=1):
|
||||
"""Fake scenario.
|
||||
|
||||
:param factor: influences the argument value for a time.sleep() call
|
||||
"""
|
||||
self._test1(factor)
|
||||
self._test2(factor)
|
||||
|
@ -734,7 +734,7 @@
|
||||
CinderVolumes.create_and_attach_volume:
|
||||
-
|
||||
args:
|
||||
volume_size: 1
|
||||
size: 1
|
||||
image:
|
||||
name: "^cirros.*uec$"
|
||||
flavor:
|
||||
@ -755,7 +755,7 @@
|
||||
-
|
||||
args:
|
||||
volume_type: false
|
||||
volume_size:
|
||||
size:
|
||||
min: 1
|
||||
max: 2
|
||||
runner:
|
||||
@ -778,7 +778,7 @@
|
||||
-
|
||||
args:
|
||||
volume_type: true
|
||||
volume_size:
|
||||
size:
|
||||
min: 1
|
||||
max: 2
|
||||
runner:
|
||||
@ -803,7 +803,7 @@
|
||||
CinderVolumes.create_nested_snapshots_and_attach_volume:
|
||||
-
|
||||
args:
|
||||
volume_size:
|
||||
size:
|
||||
min: 1
|
||||
max: 2
|
||||
nested_level:
|
||||
|
@ -24,7 +24,7 @@ def setup_app(config):
|
||||
This is a generic interface method of an application.
|
||||
|
||||
:param config: An instance of :class:`pecan.Config`.
|
||||
:return: A normal WSGI application, an instance of
|
||||
:returns: A normal WSGI application, an instance of
|
||||
:class:`pecan.Pecan`.
|
||||
"""
|
||||
app = pecan.Pecan(config.app.root, debug=logging.is_debug())
|
||||
@ -32,6 +32,7 @@ def setup_app(config):
|
||||
|
||||
|
||||
def make_app():
|
||||
"""Load Pecan application."""
|
||||
config = {
|
||||
"app": {
|
||||
"root": "rally.aas.rest.controllers.root.RootController",
|
||||
|
@ -17,14 +17,16 @@ from rally.benchmark import validation
|
||||
|
||||
|
||||
class Authenticate(base.Scenario):
|
||||
"""This class should contain authentication mechanism.
|
||||
"""Benchmark scenarios for the authentication mechanism.
|
||||
|
||||
For different types of clients like Keystone.
|
||||
Benchmark scenarios for different types of OpenStack clients like Keystone,
|
||||
Nova etc.
|
||||
"""
|
||||
|
||||
@validation.required_openstack(users=True)
|
||||
@base.scenario()
|
||||
def keystone(self):
|
||||
"""Check Keystone Client."""
|
||||
self.clients("keystone")
|
||||
|
||||
@validation.number("repetitions", minval=1)
|
||||
|
@ -19,17 +19,19 @@ from rally import consts
|
||||
|
||||
|
||||
class CeilometerAlarms(ceilometerutils.CeilometerScenario):
|
||||
"""Benchmark scenarios for Ceilometer Alarms API."""
|
||||
|
||||
@validation.required_services(consts.Service.CEILOMETER)
|
||||
@validation.required_openstack(users=True)
|
||||
@base.scenario(context={"cleanup": ["ceilometer"]})
|
||||
def create_alarm(self, meter_name, threshold, **kwargs):
|
||||
"""Test creating an alarm.
|
||||
"""Create an alarm.
|
||||
|
||||
This scenarios test POST /v2/alarms.
|
||||
meter_name and threshold are required parameters for alarm creation.
|
||||
kwargs stores other optional parameters like 'ok_actions',
|
||||
'project_id' etc that may be passed while creating alarm.
|
||||
'project_id' etc that may be passed while creating an alarm.
|
||||
|
||||
:param meter_name: specifies meter name of the alarm
|
||||
:param threshold: specifies alarm threshold
|
||||
:param kwargs: specifies optional arguments for alarm creation.
|
||||
@ -40,7 +42,7 @@ class CeilometerAlarms(ceilometerutils.CeilometerScenario):
|
||||
@validation.required_openstack(users=True)
|
||||
@base.scenario()
|
||||
def list_alarms(self):
|
||||
"""Test fetching all alarms.
|
||||
"""Fetch all alarms.
|
||||
|
||||
This scenario fetches list of all alarms using GET /v2/alarms.
|
||||
"""
|
||||
@ -50,13 +52,15 @@ class CeilometerAlarms(ceilometerutils.CeilometerScenario):
|
||||
@validation.required_openstack(users=True)
|
||||
@base.scenario(context={"cleanup": ["ceilometer"]})
|
||||
def create_and_list_alarm(self, meter_name, threshold, **kwargs):
|
||||
"""Test creating and getting newly created alarm.
|
||||
"""Create and get the newly created alarm.
|
||||
|
||||
This scenarios test GET /v2/alarms/(alarm_id)
|
||||
Initially alarm is created and then the created alarm is fetched using
|
||||
its alarm_id. meter_name and threshold are required parameters
|
||||
for alarm creation. kwargs stores other optional parameters like
|
||||
'ok_actions', 'project_id' etc. that may be passed while creating alarm
|
||||
'ok_actions', 'project_id' etc. that may be passed while creating
|
||||
an alarm.
|
||||
|
||||
:param meter_name: specifies meter name of the alarm
|
||||
:param threshold: specifies alarm threshold
|
||||
:param kwargs: specifies optional arguments for alarm creation.
|
||||
@ -68,13 +72,14 @@ class CeilometerAlarms(ceilometerutils.CeilometerScenario):
|
||||
@validation.required_openstack(users=True)
|
||||
@base.scenario(context={"cleanup": ["ceilometer"]})
|
||||
def create_and_update_alarm(self, meter_name, threshold, **kwargs):
|
||||
"""Test creating and updating the newly created alarm.
|
||||
"""Create and update the newly created alarm.
|
||||
|
||||
This scenarios test PUT /v2/alarms/(alarm_id)
|
||||
Initially alarm is created and then the created alarm is updated using
|
||||
its alarm_id. meter_name and threshold are required parameters
|
||||
for alarm creation. kwargs stores other optional parameters like
|
||||
'ok_actions', 'project_id' etc that may be passed while alarm creation.
|
||||
|
||||
:param meter_name: specifies meter name of the alarm
|
||||
:param threshold: specifies alarm threshold
|
||||
:param kwargs: specifies optional arguments for alarm creation.
|
||||
@ -87,13 +92,14 @@ class CeilometerAlarms(ceilometerutils.CeilometerScenario):
|
||||
@validation.required_openstack(users=True)
|
||||
@base.scenario(context={"cleanup": ["ceilometer"]})
|
||||
def create_and_delete_alarm(self, meter_name, threshold, **kwargs):
|
||||
"""Test creating and deleting the newly created alarm.
|
||||
"""Create and delete the newly created alarm.
|
||||
|
||||
This scenarios test DELETE /v2/alarms/(alarm_id)
|
||||
Initially alarm is created and then the created alarm is deleted using
|
||||
its alarm_id. meter_name and threshold are required parameters
|
||||
for alarm creation. kwargs stores other optional parameters like
|
||||
'ok_actions', 'project_id' etc that may be passed while alarm creation.
|
||||
|
||||
:param meter_name: specifies meter name of the alarm
|
||||
:param threshold: specifies alarm threshold
|
||||
:param kwargs: specifies optional arguments for alarm creation.
|
||||
|
@ -19,10 +19,11 @@ from rally import consts
|
||||
|
||||
|
||||
class CeilometerMeters(ceilometerutils.CeilometerScenario):
|
||||
"""Benchmark scenarios for Ceilometer Meters API."""
|
||||
|
||||
@validation.required_services(consts.Service.CEILOMETER)
|
||||
@validation.required_openstack(users=True)
|
||||
@base.scenario()
|
||||
def list_meters(self):
|
||||
"""Test fetching user's meters."""
|
||||
"""Fetch user's meters."""
|
||||
self._list_meters()
|
||||
|
@ -21,16 +21,18 @@ from rally import consts
|
||||
|
||||
|
||||
class CeilometerQueries(ceilometerutils.CeilometerScenario):
|
||||
"""Benchmark scenarios for Ceilometer Queries API."""
|
||||
|
||||
@validation.required_services(consts.Service.CEILOMETER)
|
||||
@validation.required_openstack(users=True)
|
||||
@base.scenario(context={"cleanup": ["ceilometer"]})
|
||||
def create_and_query_alarms(self, meter_name, threshold, filter=None,
|
||||
orderby=None, limit=None, **kwargs):
|
||||
"""Creates an alarm and then queries it with specific parameters.
|
||||
"""Create an alarm and then query it with specific parameters.
|
||||
|
||||
This scenario tests POST /v2/query/alarms
|
||||
An alarm is first created and then fetched using the input query
|
||||
An alarm is first created and then fetched using the input query.
|
||||
|
||||
:param meter_name: specifies meter name of alarm
|
||||
:param threshold: specifies alarm threshold
|
||||
:param filter: optional filter query dictionary
|
||||
@ -49,11 +51,12 @@ class CeilometerQueries(ceilometerutils.CeilometerScenario):
|
||||
@base.scenario(context={"cleanup": ["ceilometer"]})
|
||||
def create_and_query_alarm_history(self, meter_name, threshold,
|
||||
orderby=None, limit=None, **kwargs):
|
||||
"""Creates an alarm and then queries for its history
|
||||
"""Create an alarm and then query for its history.
|
||||
|
||||
This scenario tests POST /v2/query/alarms/history
|
||||
An alarm is first created and then its alarm_id is used to fetch the
|
||||
history of that specific alarm
|
||||
history of that specific alarm.
|
||||
|
||||
:param meter_name: specifies meter name of alarm
|
||||
:param threshold: specifies alarm threshold
|
||||
:param orderby: optional param for specifying ordering of results
|
||||
@ -71,10 +74,11 @@ class CeilometerQueries(ceilometerutils.CeilometerScenario):
|
||||
counter_unit, counter_volume, resource_id,
|
||||
filter=None, orderby=None, limit=None,
|
||||
**kwargs):
|
||||
"""Creates a sample and then queries it with specific parameters
|
||||
"""Create a sample and then query it with specific parameters.
|
||||
|
||||
This scenario tests POST /v2/query/samples
|
||||
A sample is first created and then fetched using the input query
|
||||
A sample is first created and then fetched using the input query.
|
||||
|
||||
:param counter_name: specifies name of the counter
|
||||
:param counter_type: specifies type of the counter
|
||||
:param counter_unit: specifies name of the counter
|
||||
|
@ -19,12 +19,13 @@ from rally import consts
|
||||
|
||||
|
||||
class CeilometerResource(ceilometerutils.CeilometerScenario):
|
||||
"""Benchmark scenarios for Ceilometer Resource API."""
|
||||
|
||||
@validation.required_services(consts.Service.CEILOMETER)
|
||||
@validation.required_openstack(users=True)
|
||||
@base.scenario()
|
||||
def list_resources(self):
|
||||
"""Test fetching all resources.
|
||||
"""Fetch all resources.
|
||||
|
||||
This scenario fetches list of all resources using GET /v2/resources.
|
||||
"""
|
||||
|
@ -19,16 +19,17 @@ from rally import consts
|
||||
|
||||
|
||||
class CeilometerStats(utils.CeilometerScenario):
|
||||
"""Benchmark scenarios for Ceilometer Stats API."""
|
||||
|
||||
@validation.required_services(consts.Service.CEILOMETER)
|
||||
@validation.required_openstack(users=True)
|
||||
@base.scenario(context={"cleanup": ["ceilometer"]})
|
||||
def create_meter_and_get_stats(self, **kwargs):
|
||||
"""Test creating a meter and fetching its statistics.
|
||||
"""Create a meter and fetch its statistics.
|
||||
|
||||
Meter is first created and then statistics is fetched for the same
|
||||
using GET /v2/meters/(meter_name)/statistics.
|
||||
:param name_length: length of generated (random) part of meter name
|
||||
|
||||
:param kwargs: contains optional arguments to create a meter
|
||||
"""
|
||||
meter = self._create_meter(**kwargs)
|
||||
|
@ -16,10 +16,12 @@ from rally.benchmark.scenarios import base
|
||||
|
||||
|
||||
class CeilometerScenario(base.Scenario):
|
||||
"""Base class for Ceilometer scenarios with basic atomic actions."""
|
||||
|
||||
RESOURCE_NAME_PREFIX = "rally_ceilometer_"
|
||||
|
||||
def _get_alarm_dict(self, **kwargs):
|
||||
"""Prepares and returns alarm dictionary for creating an alarm.
|
||||
"""Prepare and return an alarm dict for creating an alarm.
|
||||
|
||||
:param kwargs: optional parameters to create alarm
|
||||
:returns: alarm dictionary used to create an alarm
|
||||
@ -38,6 +40,7 @@ class CeilometerScenario(base.Scenario):
|
||||
|
||||
List alarm matching alarm_id. It fetches all alarms
|
||||
if alarm_id is None.
|
||||
|
||||
:param alarm_id: specifies id of the alarm
|
||||
:returns: list of alarms
|
||||
"""
|
||||
@ -63,7 +66,7 @@ class CeilometerScenario(base.Scenario):
|
||||
|
||||
@base.atomic_action_timer('ceilometer.delete_alarm')
|
||||
def _delete_alarm(self, alarm_id):
|
||||
"""Deletes an alarm.
|
||||
"""Delete an alarm.
|
||||
|
||||
:param alarm_id: specifies id of the alarm
|
||||
"""
|
||||
@ -71,7 +74,7 @@ class CeilometerScenario(base.Scenario):
|
||||
|
||||
@base.atomic_action_timer('ceilometer.update_alarm')
|
||||
def _update_alarm(self, alarm_id, alarm_dict_delta):
|
||||
"""Updates an alarm.
|
||||
"""Update an alarm.
|
||||
|
||||
:param alarm_id: specifies id of the alarm
|
||||
:param alarm_dict_delta: features of alarm to be updated
|
||||
@ -116,7 +119,9 @@ class CeilometerScenario(base.Scenario):
|
||||
def _query_alarms(self, filter, orderby, limit):
|
||||
"""Query alarms with specific parameters.
|
||||
|
||||
If no input params are provided, it returns all the results in database
|
||||
If no input params are provided, it returns all the results
|
||||
in the database.
|
||||
|
||||
:param limit: optional param for maximum number of results returned
|
||||
:param orderby: optional param for specifying ordering of results
|
||||
:param filter: optional filter query
|
||||
@ -129,7 +134,9 @@ class CeilometerScenario(base.Scenario):
|
||||
def _query_alarm_history(self, filter, orderby, limit):
|
||||
"""Query history of an alarm.
|
||||
|
||||
If no input params are provided, it returns all the results in database
|
||||
If no input params are provided, it returns all the results
|
||||
in the database.
|
||||
|
||||
:param limit: optional param for maximum number of results returned
|
||||
:param orderby: optional param for specifying ordering of results
|
||||
:param filter: optional filter query
|
||||
@ -141,7 +148,7 @@ class CeilometerScenario(base.Scenario):
|
||||
@base.atomic_action_timer('ceilometer.create_sample')
|
||||
def _create_sample(self, counter_name, counter_type, counter_unit,
|
||||
counter_volume, resource_id, **kwargs):
|
||||
"""Creates a Sample with specified parameters.
|
||||
"""Create a Sample with specified parameters.
|
||||
|
||||
:param counter_name: specifies name of the counter
|
||||
:param counter_type: specifies type of the counter
|
||||
@ -162,7 +169,9 @@ class CeilometerScenario(base.Scenario):
|
||||
def _query_samples(self, filter, orderby, limit):
|
||||
"""Query samples with specified parameters.
|
||||
|
||||
If no input params are provided, it returns all the results in database
|
||||
If no input params are provided, it returns all the results
|
||||
in the database.
|
||||
|
||||
:param limit: optional param for maximum number of results returned
|
||||
:param orderby: optional param for specifying ordering of results
|
||||
:param filter: optional filter query
|
||||
|
@ -49,6 +49,7 @@ CONF.register_opts(cinder_benchmark_opts, group=benchmark_group)
|
||||
|
||||
|
||||
class CinderScenario(base.Scenario):
|
||||
"""Base class for Cinder scenarios with basic atomic actions."""
|
||||
|
||||
RESOURCE_NAME_PREFIX = "rally_volume_"
|
||||
|
||||
@ -60,14 +61,13 @@ class CinderScenario(base.Scenario):
|
||||
|
||||
@base.atomic_action_timer('cinder.create_volume')
|
||||
def _create_volume(self, size, **kwargs):
|
||||
"""create one volume.
|
||||
"""Create one volume.
|
||||
|
||||
Returns when the volume is actually created and is in the "Available"
|
||||
state.
|
||||
|
||||
:param size: int be size of volume in GB
|
||||
:param **kwargs: Other optional parameters to initialize the volume
|
||||
|
||||
:param kwargs: Other optional parameters to initialize the volume
|
||||
:returns: Created volume object
|
||||
"""
|
||||
kwargs["display_name"] = kwargs.get("display_name",
|
||||
@ -111,8 +111,7 @@ class CinderScenario(base.Scenario):
|
||||
:param volume_id: volume uuid for creating snapshot
|
||||
:param force: flag to indicate whether to snapshot a volume even if
|
||||
it's attached to an instance
|
||||
:param **kwargs: Other optional parameters to initialize the volume
|
||||
|
||||
:param kwargs: Other optional parameters to initialize the volume
|
||||
:returns: Created snapshot object
|
||||
"""
|
||||
kwargs["display_name"] = kwargs.get("display_name",
|
||||
|
@ -28,23 +28,27 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
class CinderVolumes(utils.CinderScenario,
|
||||
nova_utils.NovaScenario):
|
||||
"""Benchmark scenarios for Cinder Volumes."""
|
||||
|
||||
@validation.required_services(consts.Service.CINDER)
|
||||
@validation.required_openstack(users=True)
|
||||
@base.scenario(context={"cleanup": ["cinder"]})
|
||||
def create_and_list_volume(self, size, detailed=True, **kwargs):
|
||||
"""Tests creating a volume and listing volumes.
|
||||
"""Create a volume and list all volumes.
|
||||
|
||||
This scenario is a very useful tool to measure
|
||||
the "cinder volume-list" command performance.
|
||||
Measure the "cinder volume-list" command performance.
|
||||
|
||||
If you have only 1 user in your context, you will
|
||||
add 1 volume on every iteration. So you will have more
|
||||
and more volumes and will be able to measure the
|
||||
performance of the "cinder volume-list" command depending on
|
||||
the number of images owned by users.
|
||||
If you have only 1 user in your context, you will
|
||||
add 1 volume on every iteration. So you will have more
|
||||
and more volumes and will be able to measure the
|
||||
performance of the "cinder volume-list" command depending on
|
||||
the number of images owned by users.
|
||||
|
||||
:param size: volume size (in GB)
|
||||
:param detailed: determines whether the volume listing should contain
|
||||
detailed information about all of them
|
||||
:param kwargs: optional args to create a volume
|
||||
"""
|
||||
|
||||
self._create_volume(size, **kwargs)
|
||||
self._list_volumes(detailed)
|
||||
|
||||
@ -53,11 +57,20 @@ class CinderVolumes(utils.CinderScenario,
|
||||
@base.scenario(context={"cleanup": ["cinder"]})
|
||||
def create_and_delete_volume(self, size, min_sleep=0, max_sleep=0,
|
||||
**kwargs):
|
||||
"""Tests creating and then deleting a volume.
|
||||
"""Create and then delete a volume.
|
||||
|
||||
Good for testing a maximal bandwidth of cloud.
|
||||
Good for testing a maximal bandwidth of cloud. Optional 'min_sleep'
|
||||
and 'max_sleep' parameters allow the scenario to simulate a pause
|
||||
between volume creation and deletion (of random duration from
|
||||
[min_sleep, max_sleep]).
|
||||
|
||||
:param size: volume size (in GB)
|
||||
:param min_sleep: minimum sleep time between volume creation and
|
||||
deletion (in seconds)
|
||||
:param max_sleep: maximum sleep time between volume creation and
|
||||
deletion (in seconds)
|
||||
:param kwargs: optional args to create a volume
|
||||
"""
|
||||
|
||||
volume = self._create_volume(size, **kwargs)
|
||||
self.sleep_between(min_sleep, max_sleep)
|
||||
self._delete_volume(volume)
|
||||
@ -66,10 +79,13 @@ class CinderVolumes(utils.CinderScenario,
|
||||
@validation.required_openstack(users=True)
|
||||
@base.scenario(context={"cleanup": ["cinder"]})
|
||||
def create_volume(self, size, **kwargs):
|
||||
"""Test creating volumes perfromance.
|
||||
"""Create a volume.
|
||||
|
||||
Good test to check how influence amount of active volumes on
|
||||
performance of creating new.
|
||||
|
||||
:param size: volume size (in GB)
|
||||
:param kwargs: optional args to create a volume
|
||||
"""
|
||||
self._create_volume(size, **kwargs)
|
||||
|
||||
@ -79,9 +95,21 @@ class CinderVolumes(utils.CinderScenario,
|
||||
@base.scenario(context={"cleanup": ["cinder"]})
|
||||
def create_and_delete_snapshot(self, force=False, min_sleep=0,
|
||||
max_sleep=0, **kwargs):
|
||||
"""Tests creating and then deleting a volume-snapshot."""
|
||||
volume_id = self.context["tenant"]["volume"]
|
||||
"""Create and then delete a volume-snapshot.
|
||||
|
||||
Optional 'min_sleep' and 'max_sleep' parameters allow the scenario
|
||||
to simulate a pause between snapshot creation and deletion
|
||||
(of random duration from [min_sleep, max_sleep]).
|
||||
|
||||
:param force: when set to True, allows snapshot of a volume when
|
||||
the volume is attached to an instance
|
||||
:param min_sleep: minimum sleep time between snapshot creation and
|
||||
deletion (in seconds)
|
||||
:param max_sleep: maximum sleep time between snapshot creation and
|
||||
deletion (in seconds)
|
||||
:param kwargs: optional args to create a shapshot
|
||||
"""
|
||||
volume_id = self.context["tenant"]["volume"]
|
||||
snapshot = self._create_snapshot(volume_id, force=force, **kwargs)
|
||||
self.sleep_between(min_sleep, max_sleep)
|
||||
self._delete_snapshot(snapshot)
|
||||
@ -92,23 +120,26 @@ class CinderVolumes(utils.CinderScenario,
|
||||
@validation.required_services(consts.Service.NOVA, consts.Service.CINDER)
|
||||
@validation.required_openstack(users=True)
|
||||
@base.scenario(context={"cleanup": ["cinder", "nova"]})
|
||||
def create_and_attach_volume(self, volume_size, image, flavor,
|
||||
min_sleep=0, max_sleep=0, **kwargs):
|
||||
def create_and_attach_volume(self, size, image, flavor, **kwargs):
|
||||
"""Create a VM and attach a volume to it.
|
||||
|
||||
"""Tests creating a VM and attaching a volume.
|
||||
|
||||
Simple test to create a vm and attach a volume, then
|
||||
detach the volume and cleanup.
|
||||
|
||||
:param volume_size: The size of the volume to create
|
||||
:param image: The glance image name to use for the vm
|
||||
:param flavor: the VM flavor name
|
||||
Simple test to create a VM and attach a volume, then
|
||||
detach the volume and delete volume/VM.
|
||||
|
||||
:param size: volume size (in GB)
|
||||
:param image: Glance image name to use for the VM
|
||||
:param flavor: VM flavor name
|
||||
:param kwargs: optional arguments for VM/volume creation
|
||||
"""
|
||||
if "volume_size" in kwargs:
|
||||
import warnings
|
||||
warnings.warn("'volume_size' argument is deprecated. You should "
|
||||
"use 'size' instead.")
|
||||
size = kwargs["volume_size"]
|
||||
|
||||
server = self._boot_server(
|
||||
self._generate_random_name(), image, flavor, **kwargs)
|
||||
volume = self._create_volume(volume_size, **kwargs)
|
||||
volume = self._create_volume(size, **kwargs)
|
||||
|
||||
self._attach_volume(server, volume)
|
||||
self._detach_volume(server, volume)
|
||||
@ -121,30 +152,35 @@ class CinderVolumes(utils.CinderScenario,
|
||||
@validation.required_openstack(users=True)
|
||||
@base.scenario(context={"cleanup": ["cinder", "nova"]})
|
||||
def create_snapshot_and_attach_volume(self, volume_type=False,
|
||||
volume_size=None, **kwargs):
|
||||
size=None, **kwargs):
|
||||
|
||||
"""Tests volume create, snapshot create and volume attach/detach
|
||||
"""Create volume, snapshot and attach/detach volume.
|
||||
|
||||
This scenario is based off of the standalone qaStressTest.py
|
||||
(https://github.com/WaltHP/cinder-stress).
|
||||
|
||||
:param volume_type: Whether or not to specify volume type when creating
|
||||
volumes.
|
||||
:param volume_size: Volume size - dictionary, contains two values
|
||||
min - minimum size volumes will be created as.
|
||||
max - maximum size volumes will be created as.
|
||||
default values: {"min": 1, "max": 5}
|
||||
volumes.
|
||||
:param size: Volume size - dictionary, contains two values:
|
||||
min - minimum size volumes will be created as;
|
||||
max - maximum size volumes will be created as.
|
||||
default values: {"min": 1, "max": 5}
|
||||
:param kwargs: Optional parameters used during volume
|
||||
snapshot creation.
|
||||
|
||||
"""
|
||||
if "min_volume_size" in kwargs or "max_volume_size" in kwargs:
|
||||
if "min_size" in kwargs or "max_size" in kwargs:
|
||||
import warnings
|
||||
warnings.warn("'min_volume_size' and 'max_volume_size' arguments "
|
||||
"are deprecated. You should use 'volume_size', with "
|
||||
warnings.warn("'min_size' and 'max_size' arguments "
|
||||
"are deprecated. You should use 'size', with "
|
||||
"keys 'min' and 'max' instead.")
|
||||
if volume_size is None:
|
||||
volume_size = {"min": 1, "max": 5}
|
||||
if "volume_size" in kwargs:
|
||||
import warnings
|
||||
warnings.warn("'volume_size' argument is deprecated. You should "
|
||||
"use 'size' instead.")
|
||||
size = kwargs["volume_size"]
|
||||
|
||||
if size is None:
|
||||
size = {"min": 1, "max": 5}
|
||||
selected_type = None
|
||||
volume_types = [None]
|
||||
|
||||
@ -154,9 +190,9 @@ class CinderVolumes(utils.CinderScenario,
|
||||
volume_types.append(s.name)
|
||||
selected_type = random.choice(volume_types)
|
||||
|
||||
volume_size = random.randint(volume_size['min'], volume_size['max'])
|
||||
size = random.randint(size['min'], size['max'])
|
||||
|
||||
volume = self._create_volume(volume_size, volume_type=selected_type)
|
||||
volume = self._create_volume(size, volume_type=selected_type)
|
||||
snapshot = self._create_snapshot(volume.id, False, **kwargs)
|
||||
|
||||
server = self.get_random_server()
|
||||
@ -171,47 +207,53 @@ class CinderVolumes(utils.CinderScenario,
|
||||
@validation.required_openstack(users=True)
|
||||
@base.scenario(context={"cleanup": ["cinder", "nova"]})
|
||||
def create_nested_snapshots_and_attach_volume(self,
|
||||
volume_size=None,
|
||||
size=None,
|
||||
nested_level=None,
|
||||
**kwargs):
|
||||
|
||||
"""Tests volume create from snapshot and volume attach/detach
|
||||
"""Create a volume from snapshot and attach/detach the volume
|
||||
|
||||
This scenario create volume, create it's snapshot, attach volume,
|
||||
then create new volume from existing snapshot and so on,
|
||||
with defined nested level, after all detach and delete them .
|
||||
with defined nested level, after all detach and delete them.
|
||||
volume->snapshot->volume->snapshot->volume ...
|
||||
|
||||
:param volume_size: Volume size - dictionary, contains two values
|
||||
min - minimum size volumes will be created as.
|
||||
max - maximum size volumes will be created as.
|
||||
default values: {"min": 1, "max": 5}
|
||||
:param nested_level: Nested level - dictionary, contains two values
|
||||
min - minimum number of volumes will be create from snapshot
|
||||
max - maximum number of volumes will be create from snapshot
|
||||
default values: {"min": 5, "max": 10}
|
||||
:param size: Volume size - dictionary, contains two values:
|
||||
min - minimum size volumes will be created as;
|
||||
max - maximum size volumes will be created as.
|
||||
default values: {"min": 1, "max": 5}
|
||||
:param nested_level: Nested level - dictionary, contains two values:
|
||||
min - minimum number of volumes will be created
|
||||
from snapshot;
|
||||
max - maximum number of volumes will be created
|
||||
from snapshot.
|
||||
default values: {"min": 5, "max": 10}
|
||||
:param kwargs: Optional parameters used during volume
|
||||
snapshot creation.
|
||||
|
||||
snapshot creation.
|
||||
"""
|
||||
if "volume_size" in kwargs:
|
||||
import warnings
|
||||
warnings.warn("'volume_size' argument is deprecated. You should "
|
||||
"use 'size' instead.")
|
||||
size = kwargs["volume_size"]
|
||||
|
||||
if volume_size is None:
|
||||
volume_size = {"min": 1, "max": 5}
|
||||
if size is None:
|
||||
size = {"min": 1, "max": 5}
|
||||
if nested_level is None:
|
||||
nested_level = {"min": 5, "max": 10}
|
||||
|
||||
volume_size = random.randint(volume_size['min'], volume_size['max'])
|
||||
size = random.randint(size['min'], size['max'])
|
||||
nested_level = random.randint(nested_level['min'], nested_level['max'])
|
||||
|
||||
servers = [self.get_random_server()]
|
||||
volumes = [self._create_volume(volume_size)]
|
||||
volumes = [self._create_volume(size)]
|
||||
snapshots = [self._create_snapshot(volumes[0].id, False, **kwargs)]
|
||||
|
||||
self._attach_volume(servers[0], volumes[0])
|
||||
|
||||
snapshot = snapshots[0]
|
||||
for i in range(nested_level - 1):
|
||||
volume = self._create_volume(volume_size, snapshot_id=snapshot.id)
|
||||
volume = self._create_volume(size, snapshot_id=snapshot.id)
|
||||
snapshot = self._create_snapshot(volume.id, False, **kwargs)
|
||||
server = self.get_random_server()
|
||||
|
||||
|
@ -21,15 +21,15 @@ from rally import consts
|
||||
|
||||
|
||||
class DesignateBasic(utils.DesignateScenario):
|
||||
"""Basic benchmark scenarios for Designate."""
|
||||
|
||||
@validation.required_services(consts.Service.DESIGNATE)
|
||||
@validation.required_openstack(users=True)
|
||||
@base.scenario(context={"cleanup": ["designate"]})
|
||||
def create_and_list_domains(self):
|
||||
"""Tests creating a domain and listing domains.
|
||||
"""Create a domain and list all domains.
|
||||
|
||||
This scenario is a very useful tool to measure
|
||||
the "designate domain-list" command performance.
|
||||
Measure the "designate domain-list" command performance.
|
||||
|
||||
If you have only 1 user in your context, you will
|
||||
add 1 domain on every iteration. So you will have more
|
||||
@ -44,7 +44,7 @@ class DesignateBasic(utils.DesignateScenario):
|
||||
@validation.required_openstack(users=True)
|
||||
@base.scenario(context={"cleanup": ["designate"]})
|
||||
def list_domains(self):
|
||||
"""Test the designate domain-list command.
|
||||
"""List Designate domains.
|
||||
|
||||
This simple scenario tests the designate domain-list command by listing
|
||||
all the domains.
|
||||
@ -60,10 +60,10 @@ class DesignateBasic(utils.DesignateScenario):
|
||||
@validation.required_openstack(users=True)
|
||||
@base.scenario(context={"cleanup": ["designate"]})
|
||||
def create_and_delete_domain(self):
|
||||
"""Test adds and then deletes domain.
|
||||
"""Add and then delete a domain.
|
||||
|
||||
This is very useful to measure perfromance of creating and deleting
|
||||
domains with different level of load.
|
||||
Measure the performance of creating and deleting domains
|
||||
with different level of load.
|
||||
"""
|
||||
domain = self._create_domain()
|
||||
self._delete_domain(domain['id'])
|
||||
@ -72,10 +72,10 @@ class DesignateBasic(utils.DesignateScenario):
|
||||
@validation.required_openstack(users=True)
|
||||
@base.scenario(context={"cleanup": ["designate"]})
|
||||
def create_and_delete_records(self, records_per_domain=5):
|
||||
"""Test adds and then deletes records.
|
||||
"""Add and then delete records.
|
||||
|
||||
This is very useful to measure perfromance of creating and deleting
|
||||
records with different level of load.
|
||||
Measure the performance of creating and deleting records
|
||||
with different level of load.
|
||||
|
||||
:param records_per_domain: Records to create pr domain.
|
||||
"""
|
||||
@ -99,7 +99,7 @@ class DesignateBasic(utils.DesignateScenario):
|
||||
@validation.required_openstack(users=True)
|
||||
@base.scenario(context={"cleanup": ["designate"]})
|
||||
def list_records(self, domain_id):
|
||||
"""Test the designate record-list command.
|
||||
"""List Designate records.
|
||||
|
||||
This simple scenario tests the designate record-list command by listing
|
||||
all the records in a domain.
|
||||
@ -117,7 +117,7 @@ class DesignateBasic(utils.DesignateScenario):
|
||||
@validation.required_openstack(users=True)
|
||||
@base.scenario(context={"cleanup": ["designate"]})
|
||||
def create_and_list_records(self, records_per_domain=5):
|
||||
"""Test adds and then lists records.
|
||||
"""Add and then list records.
|
||||
|
||||
If you have only 1 user in your context, you will
|
||||
add 1 record on every iteration. So you will have more
|
||||
|
@ -18,7 +18,7 @@ from rally.benchmark.scenarios import base
|
||||
|
||||
|
||||
class DesignateScenario(base.Scenario):
|
||||
"""This class should contain base operations for benchmarking designate."""
|
||||
"""Base class for Designate scenarios with basic atomic actions."""
|
||||
|
||||
RESOURCE_NAME_PREFIX = "rally_"
|
||||
|
||||
@ -44,16 +44,18 @@ class DesignateScenario(base.Scenario):
|
||||
def _delete_domain(self, domain_id):
|
||||
"""Delete designate zone.
|
||||
|
||||
:param domain: Domain object
|
||||
:param domain_id: domain ID
|
||||
"""
|
||||
self.clients("designate").domains.delete(domain_id)
|
||||
|
||||
def _create_record(self, domain, record=None, atomic_action=True):
|
||||
"""Create a record in a domain.
|
||||
|
||||
:param domain: Domain object
|
||||
:param record: Record object
|
||||
:returns: designate record dict
|
||||
:param domain: domain dict
|
||||
:param record: record dict
|
||||
:param atomic_action: True if the record creation should be tracked
|
||||
as an atomic action
|
||||
:returns: Designate record dict
|
||||
"""
|
||||
record = record or {}
|
||||
record.setdefault('type', 'A')
|
||||
@ -71,18 +73,20 @@ class DesignateScenario(base.Scenario):
|
||||
|
||||
@base.atomic_action_timer('designate.list_records')
|
||||
def _list_records(self, domain_id):
|
||||
"""List records in a domain..
|
||||
"""List domain records.
|
||||
|
||||
:param domain_id: Domain ID
|
||||
:returns: domain record list
|
||||
:param domain_id: domain ID
|
||||
:returns: domain records list
|
||||
"""
|
||||
return self.clients("designate").records.list(domain_id)
|
||||
|
||||
def _delete_record(self, domain_id, record_id, atomic_action=True):
|
||||
"""Delete a record in a domain..
|
||||
"""Delete a domain record.
|
||||
|
||||
:param domain_id: Domain ID
|
||||
:param record_id: Record ID
|
||||
:param domain_id: domain ID
|
||||
:param record_id: record ID
|
||||
:param atomic_action: True if the record creation should be tracked
|
||||
as an atomic action
|
||||
"""
|
||||
client = self.clients('designate')
|
||||
|
||||
|
@ -23,17 +23,17 @@ class DummyScenarioException(exceptions.RallyException):
|
||||
|
||||
|
||||
class Dummy(base.Scenario):
|
||||
"""Benchmarks for testing Rally benchmark engine at scale."""
|
||||
"""Dummy benchmarks for testing Rally benchmark engine at scale."""
|
||||
|
||||
@base.scenario()
|
||||
def dummy(self, sleep=0):
|
||||
"""Test the performance of ScenarioRunners.
|
||||
"""Do nothing and sleep for the given number of seconds (0 by default).
|
||||
|
||||
Dummy.dummy can be used for testing performance of different
|
||||
ScenarioRunners and ability of rally to store a large
|
||||
ScenarioRunners and of the ability of rally to store a large
|
||||
amount of results.
|
||||
|
||||
:param sleep: Idle time of method.
|
||||
:param sleep: idle time of method (in seconds).
|
||||
"""
|
||||
if sleep:
|
||||
time.sleep(sleep)
|
||||
@ -42,13 +42,14 @@ class Dummy(base.Scenario):
|
||||
minval=1, integer_only=True, nullable=True)
|
||||
@base.scenario()
|
||||
def dummy_exception(self, size_of_message=1):
|
||||
"""Test if exceptions are processed properly.
|
||||
"""Throw an exception.
|
||||
|
||||
Dummy.dummy_exception can be used for test if Exceptions are processed
|
||||
Dummy.dummy_exception can be used for test if exceptions are processed
|
||||
properly by ScenarioRunners and benchmark and analyze rally
|
||||
results storing process.
|
||||
|
||||
:param size_of_message: the size of the message.
|
||||
:param size_of_message: int size of the exception message
|
||||
:raises: DummyScenarioException
|
||||
"""
|
||||
|
||||
raise DummyScenarioException("M" * size_of_message)
|
||||
@ -57,9 +58,11 @@ class Dummy(base.Scenario):
|
||||
minval=0, maxval=1, integer_only=False, nullable=True)
|
||||
@base.scenario()
|
||||
def dummy_exception_probability(self, exception_probability=0.5):
|
||||
"""Test if exceptions are processed properly.
|
||||
"""Throw an exception with given probability.
|
||||
|
||||
This scenario will throw an exception sometimes.
|
||||
Dummy.dummy_exception_probability can be used to test if exceptions
|
||||
are processed properly by ScenarioRunners. This scenario will throw
|
||||
an exception sometimes, depending on the given exception probability.
|
||||
|
||||
:param exception_probability: Sets how likely it is that an exception
|
||||
will be thrown. Float between 0 and 1
|
||||
@ -74,6 +77,11 @@ class Dummy(base.Scenario):
|
||||
|
||||
@base.scenario()
|
||||
def dummy_with_scenario_output(self):
|
||||
"""Return a dummy scenario output.
|
||||
|
||||
Dummy.dummy_with_scenario_output can be used to test the scenario
|
||||
output processing.
|
||||
"""
|
||||
out = {
|
||||
'value_1': random.randint(1, 100),
|
||||
'value_2': random.random()
|
||||
@ -83,10 +91,22 @@ class Dummy(base.Scenario):
|
||||
|
||||
@base.atomic_action_timer("dummy_fail_test")
|
||||
def _random_fail_emitter(self, exception_probability):
|
||||
"""Throw an exception with given probability.
|
||||
|
||||
:raises: KeyError
|
||||
"""
|
||||
if random.random() < exception_probability:
|
||||
raise KeyError("Dummy test exception")
|
||||
|
||||
@base.scenario()
|
||||
def dummy_random_fail_in_atomic(self, exception_probability=0.5):
|
||||
"""Randomly throw exceptions in atomic actions.
|
||||
|
||||
Dummy.dummy_random_fail_in_atomic can be used to test atomic actions
|
||||
failures processing.
|
||||
|
||||
:param exception_probability: Probability with which atomic actions
|
||||
fail in this dummy scenario (0 <= p <= 1)
|
||||
"""
|
||||
self._random_fail_emitter(exception_probability)
|
||||
self._random_fail_emitter(exception_probability)
|
||||
|
@ -22,6 +22,7 @@ from rally import consts
|
||||
|
||||
|
||||
class GlanceImages(utils.GlanceScenario, nova_utils.NovaScenario):
|
||||
"""Benchmark scenarios for Glance images."""
|
||||
|
||||
RESOURCE_NAME_PREFIX = "rally_image_"
|
||||
RESOURCE_NAME_LENGTH = 16
|
||||
@ -31,16 +32,22 @@ class GlanceImages(utils.GlanceScenario, nova_utils.NovaScenario):
|
||||
@base.scenario(context={"cleanup": ["glance"]})
|
||||
def create_and_list_image(self, container_format,
|
||||
image_location, disk_format, **kwargs):
|
||||
"""Test adding an image and then listing all images.
|
||||
"""Add an image and then list all images.
|
||||
|
||||
This scenario is a very useful tool to measure
|
||||
the "glance image-list" command performance.
|
||||
Measure the "glance image-list" command performance.
|
||||
|
||||
If you have only 1 user in your context, you will
|
||||
add 1 image on every iteration. So you will have more
|
||||
and more images and will be able to measure the
|
||||
performance of the "glance image-list" command depending on
|
||||
the number of images owned by users.
|
||||
|
||||
:param container_format: container format of image. Acceptable
|
||||
formats: ami, ari, aki, bare, and ovf
|
||||
:param image_location: image file location
|
||||
:param disk_format: disk format of image. Acceptable formats:
|
||||
ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso
|
||||
:param kwargs: optional parameters to create image
|
||||
"""
|
||||
self._create_image(self._generate_random_name(),
|
||||
container_format,
|
||||
@ -53,7 +60,7 @@ class GlanceImages(utils.GlanceScenario, nova_utils.NovaScenario):
|
||||
@validation.required_openstack(users=True)
|
||||
@base.scenario(context={"cleanup": ["glance"]})
|
||||
def list_images(self):
|
||||
"""Test the glance image-list command.
|
||||
"""List all images.
|
||||
|
||||
This simple scenario tests the glance image-list command by listing
|
||||
all the images.
|
||||
@ -70,7 +77,15 @@ class GlanceImages(utils.GlanceScenario, nova_utils.NovaScenario):
|
||||
@base.scenario(context={"cleanup": ["glance"]})
|
||||
def create_and_delete_image(self, container_format,
|
||||
image_location, disk_format, **kwargs):
|
||||
"""Test adds and then deletes image."""
|
||||
"""Add and then delete an image.
|
||||
|
||||
:param container_format: container format of image. Acceptable
|
||||
formats: ami, ari, aki, bare, and ovf
|
||||
:param image_location: image file location
|
||||
:param disk_format: disk format of image. Acceptable formats:
|
||||
ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso
|
||||
:param kwargs: optional parameters to create image
|
||||
"""
|
||||
image_name = self._generate_random_name()
|
||||
image = self._create_image(image_name,
|
||||
container_format,
|
||||
@ -88,7 +103,17 @@ class GlanceImages(utils.GlanceScenario, nova_utils.NovaScenario):
|
||||
image_location, disk_format,
|
||||
flavor, number_instances,
|
||||
**kwargs):
|
||||
"""Test adds image, boots instance from it and then deletes them."""
|
||||
"""Add an image and boot several instances from it.
|
||||
|
||||
:param container_format: container format of image. Acceptable
|
||||
formats: ami, ari, aki, bare, and ovf
|
||||
:param image_location: image file location
|
||||
:param disk_format: disk format of image. Acceptable formats:
|
||||
ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso
|
||||
:param flavor: Nova flavor to be used to launch an instance
|
||||
:param number_instances: number of Nova servers to boot
|
||||
:param kwargs: optional parameters to create image / server
|
||||
"""
|
||||
image_name = self._generate_random_name()
|
||||
image = self._create_image(image_name,
|
||||
container_format,
|
||||
|
@ -50,11 +50,11 @@ CONF.register_opts(glance_benchmark_opts, group=benchmark_group)
|
||||
|
||||
|
||||
class GlanceScenario(base.Scenario):
|
||||
"""Base class for Glance scenarios with basic atomic actions."""
|
||||
|
||||
@base.atomic_action_timer('glance.list_images')
|
||||
def _list_images(self):
|
||||
"""Returns user images list."""
|
||||
|
||||
return list(self.clients("glance").images.list())
|
||||
|
||||
@base.atomic_action_timer('glance.create_image')
|
||||
@ -62,17 +62,16 @@ class GlanceScenario(base.Scenario):
|
||||
image_location, disk_format, **kwargs):
|
||||
"""Create a new image.
|
||||
|
||||
:param image_name: String used to name the image
|
||||
:param container_format: Container format of image.
|
||||
Acceptable formats: ami, ari, aki, bare, and ovf.
|
||||
:param image_location: image file location used to upload
|
||||
:param disk_format: Disk format of image. Acceptable formats:
|
||||
ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso.
|
||||
:param **kwargs: optional parameters to create image
|
||||
:param image_name: string used to name the image
|
||||
:param container_format: container format of image. Acceptable
|
||||
formats: ami, ari, aki, bare, and ovf
|
||||
:param image_location: image file location
|
||||
:param disk_format: disk format of image. Acceptable formats:
|
||||
ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso
|
||||
:param kwargs: optional parameters to create image
|
||||
|
||||
returns: object of image
|
||||
:returns: image object
|
||||
"""
|
||||
|
||||
kw = {
|
||||
"name": image_name,
|
||||
"container_format": container_format,
|
||||
@ -107,7 +106,7 @@ class GlanceScenario(base.Scenario):
|
||||
|
||||
@base.atomic_action_timer('glance.delete_image')
|
||||
def _delete_image(self, image):
|
||||
"""Deletes the given image.
|
||||
"""Deletes given image.
|
||||
|
||||
Returns when the image is actually deleted.
|
||||
|
||||
|
@ -20,6 +20,7 @@ from rally import consts
|
||||
|
||||
|
||||
class HeatStacks(utils.HeatScenario):
|
||||
"""Benchmark scenarios for Heat stacks."""
|
||||
|
||||
RESOURCE_NAME_PREFIX = "rally_stack_"
|
||||
RESOURCE_NAME_LENGTH = 7
|
||||
@ -39,13 +40,13 @@ class HeatStacks(utils.HeatScenario):
|
||||
@validation.required_openstack(users=True)
|
||||
@base.scenario(context={"cleanup": ["heat"]})
|
||||
def create_and_list_stack(self, template_path=None):
|
||||
"""Test adding an stack and then listing all stacks.
|
||||
"""Add a stack and then list all stacks.
|
||||
|
||||
This scenario is a very useful tool to measure
|
||||
the "heat stack-create" and "heat stack-list" command performance.
|
||||
Mesure the "heat stack-create" and "heat stack-list" commands
|
||||
performance.
|
||||
|
||||
:param template_path: path to template file. if it's None or incorrect,
|
||||
will be used default empty template.
|
||||
:param template_path: path to template file. If None or incorrect,
|
||||
then default empty template will be used.
|
||||
"""
|
||||
|
||||
stack_name = self._generate_random_name()
|
||||
@ -58,13 +59,13 @@ class HeatStacks(utils.HeatScenario):
|
||||
@validation.required_openstack(users=True)
|
||||
@base.scenario(context={"cleanup": ["heat"]})
|
||||
def create_and_delete_stack(self, template_path=None):
|
||||
"""Test adds and then deletes stack.
|
||||
"""Add and then delete a stack.
|
||||
|
||||
This scenario is a very useful tool to measure
|
||||
the "heat stack-create" and "heat stack-delete" command performance.
|
||||
Measure the "heat stack-create" and "heat stack-delete" commands
|
||||
performance.
|
||||
|
||||
:param template_path: path to template file. if it's None or incorrect,
|
||||
will be used default empty template.
|
||||
:param template_path: path to template file. If None or incorrect,
|
||||
then default empty template will be used.
|
||||
"""
|
||||
stack_name = self._generate_random_name()
|
||||
template = self._get_template_from_file(template_path)
|
||||
|
@ -55,6 +55,7 @@ def heat_resource_is(status):
|
||||
|
||||
|
||||
class HeatScenario(base.Scenario):
|
||||
"""Base class for Heat scenarios with basic atomic actions."""
|
||||
|
||||
default_template = "HeatTemplateFormatVersion: '2012-12-12'"
|
||||
|
||||
@ -71,7 +72,7 @@ class HeatScenario(base.Scenario):
|
||||
:param stack_name: string. Name for created stack.
|
||||
:param template: optional parameter. Template with stack description.
|
||||
|
||||
returns: object of stack
|
||||
:returns: object of stack
|
||||
"""
|
||||
template = template or self.default_template
|
||||
|
||||
@ -102,7 +103,7 @@ class HeatScenario(base.Scenario):
|
||||
|
||||
@base.atomic_action_timer('heat.delete_stack')
|
||||
def _delete_stack(self, stack):
|
||||
"""Delete the given stack.
|
||||
"""Delete given stack.
|
||||
|
||||
Returns when the stack is actually deleted.
|
||||
|
||||
|
@ -19,17 +19,30 @@ from rally.benchmark import validation
|
||||
|
||||
|
||||
class KeystoneBasic(kutils.KeystoneScenario):
|
||||
"""Basic benchmark scenarios for Keystone."""
|
||||
|
||||
@validation.number("name_length", minval=10)
|
||||
@validation.required_openstack(admin=True)
|
||||
@base.scenario(context={"admin_cleanup": ["keystone"]})
|
||||
def create_user(self, name_length=10, **kwargs):
|
||||
"""Create a keystone user with random name.
|
||||
|
||||
:param name_length: length of the random part of user name
|
||||
:param kwargs: Other optional parameters to create users like
|
||||
"tenant_id", "enabled".
|
||||
"""
|
||||
self._user_create(name_length=name_length, **kwargs)
|
||||
|
||||
@validation.number("name_length", minval=10)
|
||||
@validation.required_openstack(admin=True)
|
||||
@base.scenario(context={"admin_cleanup": ["keystone"]})
|
||||
def create_delete_user(self, name_length=10, **kwargs):
|
||||
"""Create a keystone user with random name and then delete it.
|
||||
|
||||
:param name_length: length of the random part of user name
|
||||
:param kwargs: Other optional parameters to create users like
|
||||
"tenant_id", "enabled".
|
||||
"""
|
||||
user = self._user_create(name_length=name_length, **kwargs)
|
||||
self._resource_delete(user)
|
||||
|
||||
@ -37,6 +50,11 @@ class KeystoneBasic(kutils.KeystoneScenario):
|
||||
@validation.required_openstack(admin=True)
|
||||
@base.scenario(context={"admin_cleanup": ["keystone"]})
|
||||
def create_tenant(self, name_length=10, **kwargs):
|
||||
"""Create a keystone tenant with random name.
|
||||
|
||||
:param name_length: length of the random part of tenant name
|
||||
:param kwargs: Other optional parameters
|
||||
"""
|
||||
self._tenant_create(name_length=name_length, **kwargs)
|
||||
|
||||
@validation.number("name_length", minval=10)
|
||||
@ -45,6 +63,13 @@ class KeystoneBasic(kutils.KeystoneScenario):
|
||||
@base.scenario(context={"admin_cleanup": ["keystone"]})
|
||||
def create_tenant_with_users(self, users_per_tenant, name_length=10,
|
||||
**kwargs):
|
||||
"""Create a keystone tenant and several users belonging to it.
|
||||
|
||||
:param name_length: length of the random part of tenant/user name
|
||||
:param users_per_tenant: number of users to create for the tenant
|
||||
:param kwargs: Other optional parameters for tenant creation
|
||||
:returns: keystone tenant instance
|
||||
"""
|
||||
tenant = self._tenant_create(name_length=name_length, **kwargs)
|
||||
self._users_create(tenant, users_per_tenant=users_per_tenant,
|
||||
name_length=name_length)
|
||||
@ -53,6 +78,12 @@ class KeystoneBasic(kutils.KeystoneScenario):
|
||||
@validation.required_openstack(admin=True)
|
||||
@base.scenario(context={"admin_cleanup": ["keystone"]})
|
||||
def create_and_list_users(self, name_length=10, **kwargs):
|
||||
"""Create a keystone user with random name and list all users.
|
||||
|
||||
:param name_length: length of the random part of user name
|
||||
:param kwargs: Other optional parameters to create users like
|
||||
"tenant_id", "enabled".
|
||||
"""
|
||||
self._user_create(name_length=name_length, **kwargs)
|
||||
self._list_users()
|
||||
|
||||
@ -60,5 +91,10 @@ class KeystoneBasic(kutils.KeystoneScenario):
|
||||
@validation.required_openstack(admin=True)
|
||||
@base.scenario(context={"admin_cleanup": ["keystone"]})
|
||||
def create_and_list_tenants(self, name_length=10, **kwargs):
|
||||
"""Create a keystone tenant with random name and list all tenants.
|
||||
|
||||
:param name_length: length of the random part of tenant name
|
||||
:param kwargs: Other optional parameters
|
||||
"""
|
||||
self._tenant_create(name_length=name_length, **kwargs)
|
||||
self._list_tenants()
|
||||
|
@ -23,6 +23,8 @@ def is_temporary(resource):
|
||||
|
||||
|
||||
class KeystoneScenario(base.Scenario):
|
||||
"""Base class for Keystone scenarios with basic atomic actions."""
|
||||
|
||||
RESOURCE_NAME_PREFIX = "rally_keystone_"
|
||||
|
||||
@base.atomic_action_timer('keystone.create_user')
|
||||
@ -30,9 +32,9 @@ class KeystoneScenario(base.Scenario):
|
||||
"""Creates keystone user with random name.
|
||||
|
||||
:param name_length: length of generated (random) part of name
|
||||
:param **kwargs: Other optional parameters to create users like
|
||||
:param kwargs: Other optional parameters to create users like
|
||||
"tenant_id", "enabled".
|
||||
:return: keystone user instance
|
||||
:returns: keystone user instance
|
||||
"""
|
||||
name = self._generate_random_name(length=name_length)
|
||||
# NOTE(boris-42): password and email parameters are required by
|
||||
@ -54,8 +56,8 @@ class KeystoneScenario(base.Scenario):
|
||||
"""Creates keystone tenant with random name.
|
||||
|
||||
:param name_length: length of generated (random) part of name
|
||||
:param **kwargs: Other optional parameters
|
||||
:return: keystone tenant instance
|
||||
:param kwargs: Other optional parameters
|
||||
:returns: keystone tenant instance
|
||||
"""
|
||||
name = self._generate_random_name(length=name_length)
|
||||
return self.admin_clients("keystone").tenants.create(name, **kwargs)
|
||||
@ -64,6 +66,7 @@ class KeystoneScenario(base.Scenario):
|
||||
def _users_create(self, tenant, users_per_tenant, name_length=10):
|
||||
"""Adds users to a tenant.
|
||||
|
||||
:param tenant: tenant object
|
||||
:param users_per_tenant: number of users in per tenant
|
||||
:param name_length: length of generated (random) part of name for user
|
||||
"""
|
||||
@ -76,10 +79,10 @@ class KeystoneScenario(base.Scenario):
|
||||
|
||||
@base.atomic_action_timer('keystone.list_users')
|
||||
def _list_users(self):
|
||||
"""list users."""
|
||||
"""List users."""
|
||||
return self.admin_clients("keystone").users.list()
|
||||
|
||||
@base.atomic_action_timer('keystone.list_tenants')
|
||||
def _list_tenants(self):
|
||||
"""list tenants."""
|
||||
"""List tenants."""
|
||||
return self.admin_clients("keystone").tenants.list()
|
||||
|
@ -20,15 +20,15 @@ from rally import consts
|
||||
|
||||
|
||||
class NeutronNetworks(utils.NeutronScenario):
|
||||
"""Benchmark scenarios for Neutron."""
|
||||
|
||||
@validation.required_services(consts.Service.NEUTRON)
|
||||
@validation.required_openstack(users=True)
|
||||
@base.scenario(context={"cleanup": ["neutron"]})
|
||||
def create_and_list_networks(self, network_create_args=None):
|
||||
"""Create a network and then listing all networks.
|
||||
"""Create a network and then list all networks.
|
||||
|
||||
This scenario is a very useful tool to measure
|
||||
the "neutron net-list" command performance.
|
||||
Measure the "neutron net-list" command performance.
|
||||
|
||||
If you have only 1 user in your context, you will
|
||||
add 1 network on every iteration. So you will have more
|
||||
@ -47,10 +47,9 @@ class NeutronNetworks(utils.NeutronScenario):
|
||||
def create_and_update_networks(self,
|
||||
network_update_args,
|
||||
network_create_args=None):
|
||||
"""Create a network and then update network.
|
||||
"""Create and update a network.
|
||||
|
||||
This scenario is a very useful tool to measure
|
||||
the "neutron net-create and net-update" command performance.
|
||||
Measure the "neutron net-create and net-update" command performance.
|
||||
|
||||
:param network_update_args: dict, PUT /v2.0/networks update request
|
||||
:param network_create_args: dict, POST /v2.0/networks request options
|
||||
@ -61,12 +60,11 @@ class NeutronNetworks(utils.NeutronScenario):
|
||||
@validation.required_services(consts.Service.NEUTRON)
|
||||
@base.scenario(context={"cleanup": ["neutron"]})
|
||||
def create_and_delete_networks(self, network_create_args=None):
|
||||
"""Create a network and then deleting it.
|
||||
"""Create and delete a network.
|
||||
|
||||
This scenario is a very useful tool to measure
|
||||
the "neutron net-create" and "net-delete" command performance.
|
||||
Measure the "neutron net-create" and "net-delete" command performance.
|
||||
|
||||
:param network_create_agrs: dict, POST /v2.0/networks request options
|
||||
:param network_create_args: dict, POST /v2.0/networks request options
|
||||
"""
|
||||
network = self._create_network(network_create_args or {})
|
||||
self._delete_network(network['network'])
|
||||
@ -80,7 +78,7 @@ class NeutronNetworks(utils.NeutronScenario):
|
||||
subnet_create_args=None,
|
||||
subnet_cidr_start=None,
|
||||
subnets_per_network=None):
|
||||
"""Test creating and listing a given number of subnets.
|
||||
"""Create and a given number of subnets and list all subnets.
|
||||
|
||||
The scenario creates a network, a given number of subnets and then
|
||||
lists subnets.
|
||||
@ -106,10 +104,10 @@ class NeutronNetworks(utils.NeutronScenario):
|
||||
subnet_create_args=None,
|
||||
subnet_cidr_start=None,
|
||||
subnets_per_network=None):
|
||||
"""Create a subnet and then update subnet.
|
||||
"""Create and update a subnet.
|
||||
|
||||
The scenario creates a network, a given number of subnets
|
||||
and then updates the subnet. This scenario measure the
|
||||
and then updates the subnet. This scenario measures the
|
||||
"neutron subnet-update" command performance.
|
||||
|
||||
:param subnet_update_args: dict, PUT /v2.0/subnets update options
|
||||
@ -135,7 +133,7 @@ class NeutronNetworks(utils.NeutronScenario):
|
||||
subnet_create_args=None,
|
||||
subnet_cidr_start=None,
|
||||
subnets_per_network=None):
|
||||
"""Test creating and deleting a given number of subnets.
|
||||
"""Create and delete a given number of subnets.
|
||||
|
||||
The scenario creates a network, a given number of subnets and then
|
||||
deletes subnets.
|
||||
@ -164,7 +162,7 @@ class NeutronNetworks(utils.NeutronScenario):
|
||||
subnet_cidr_start=None,
|
||||
subnets_per_network=None,
|
||||
router_create_args=None):
|
||||
"""Test creating and listing a given number of routers.
|
||||
"""Create and a given number of routers and list all routers.
|
||||
|
||||
Create a network, a given number of subnets and routers
|
||||
and then list all routers.
|
||||
@ -200,7 +198,7 @@ class NeutronNetworks(utils.NeutronScenario):
|
||||
subnet_cidr_start=None,
|
||||
subnets_per_network=None,
|
||||
router_create_args=None):
|
||||
"""Test creating and updating a given number of routers.
|
||||
"""Create and update a given number of routers.
|
||||
|
||||
Create a network, a given number of subnets and routers
|
||||
and then updating all routers.
|
||||
@ -233,7 +231,7 @@ class NeutronNetworks(utils.NeutronScenario):
|
||||
network_create_args=None,
|
||||
port_create_args=None,
|
||||
ports_per_network=None):
|
||||
"""Test creating and listing a given number of ports.
|
||||
"""Create and a given number of ports and list all ports.
|
||||
|
||||
:param network_create_args: dict, POST /v2.0/networks request options
|
||||
:param port_create_args: dict, POST /v2.0/ports request options
|
||||
@ -254,11 +252,10 @@ class NeutronNetworks(utils.NeutronScenario):
|
||||
network_create_args=None,
|
||||
port_create_args=None,
|
||||
ports_per_network=None):
|
||||
"""Test creating and updating a given number of ports.
|
||||
"""Create and update a given number of ports.
|
||||
|
||||
This scenario is a very useful tool to measure
|
||||
the "neutron port-create" and
|
||||
"neutron port-update" command performance.
|
||||
Measure the "neutron port-create" and "neutron port-update" commands
|
||||
performance.
|
||||
|
||||
:param port_update_args: dict, PUT /v2.0/ports update request options
|
||||
:param network_create_args: dict, POST /v2.0/networks request options
|
||||
@ -277,11 +274,10 @@ class NeutronNetworks(utils.NeutronScenario):
|
||||
network_create_args=None,
|
||||
port_create_args=None,
|
||||
ports_per_network=None):
|
||||
"""Create a port and then deleting it.
|
||||
"""Create and delete a port.
|
||||
|
||||
This scenario is a very useful tool to measure
|
||||
the "neutron port-create" and
|
||||
"neutron port-delete" command performance.
|
||||
Measure the "neutron port-create" and "neutron port-delete" commands
|
||||
performance.
|
||||
|
||||
:param network_create_args: dict, POST /v2.0/networks request options
|
||||
:param port_create_args: dict, POST /v2.0/ports request options
|
||||
|
@ -22,7 +22,7 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class NeutronScenario(base.Scenario):
|
||||
"""This class should contain base operations for benchmarking neutron."""
|
||||
"""Base class for Neutron scenarios with basic atomic actions."""
|
||||
|
||||
RESOURCE_NAME_PREFIX = "rally_net_"
|
||||
RESOURCE_NAME_LENGTH = 16
|
||||
|
@ -27,6 +27,7 @@ class NovaSecurityGroupException(exceptions.RallyException):
|
||||
|
||||
|
||||
class NovaSecGroup(utils.NovaScenario):
|
||||
"""Benchmark scenarios for Nova security groups."""
|
||||
|
||||
RESOURCE_NAME_PREFIX = "rally_novasecgrp_"
|
||||
|
||||
@ -37,7 +38,7 @@ class NovaSecGroup(utils.NovaScenario):
|
||||
@base.scenario(context={"cleanup": ["nova"]})
|
||||
def create_and_delete_secgroups(self, security_group_count,
|
||||
rules_per_security_group):
|
||||
"""Tests creating and deleting security groups.
|
||||
"""Create and delete security groups.
|
||||
|
||||
This scenario creates N security groups with M rules per group and then
|
||||
deletes them.
|
||||
@ -61,7 +62,7 @@ class NovaSecGroup(utils.NovaScenario):
|
||||
@base.scenario(context={"cleanup": ["nova"]})
|
||||
def create_and_list_secgroups(self, security_group_count,
|
||||
rules_per_security_group):
|
||||
"""Tests creating and listing security groups.
|
||||
"""Create and list security groups.
|
||||
|
||||
This scenario creates N security groups with M rules per group and then
|
||||
lists them.
|
||||
@ -89,7 +90,7 @@ class NovaSecGroup(utils.NovaScenario):
|
||||
def boot_and_delete_server_with_secgroups(self, image, flavor,
|
||||
security_group_count,
|
||||
rules_per_security_group):
|
||||
"""Tests booting an image with security groups attached.
|
||||
"""Boot and delete server with security groups attached.
|
||||
|
||||
Plan of this scenario:
|
||||
- create N security groups with M rules per group
|
||||
|
@ -31,6 +31,7 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
class NovaServers(utils.NovaScenario,
|
||||
cinder_utils.CinderScenario):
|
||||
"""Benchmark scenarios for Nova servers."""
|
||||
|
||||
RESOURCE_NAME_PREFIX = "rally_novaserver_"
|
||||
RESOURCE_NAME_LENGTH = 16
|
||||
@ -43,16 +44,21 @@ class NovaServers(utils.NovaScenario,
|
||||
@base.scenario(context={"cleanup": ["nova"]})
|
||||
def boot_and_list_server(self, image, flavor,
|
||||
detailed=True, **kwargs):
|
||||
"""Tests booting an image and then listing servers.
|
||||
"""Boot a server from an image and then list all servers.
|
||||
|
||||
This scenario is a very useful tool to measure
|
||||
the "nova list" command performance.
|
||||
Measure the "nova list" command performance.
|
||||
|
||||
If you have only 1 user in your context, you will
|
||||
add 1 server on every iteration. So you will have more
|
||||
and more servers and will be able to measure the
|
||||
performance of the "nova list" command depending on
|
||||
the number of servers owned by users.
|
||||
If you have only 1 user in your context, you will
|
||||
add 1 server on every iteration. So you will have more
|
||||
and more servers and will be able to measure the
|
||||
performance of the "nova list" command depending on
|
||||
the number of servers owned by users.
|
||||
|
||||
:param image: image to be used to boot an instance
|
||||
:param flavor: flavor to be used to boot an instance
|
||||
:param detailed: True if the server listing should contain
|
||||
detailed information about all of them
|
||||
:param kwargs: Optional additional arguments for server creation
|
||||
"""
|
||||
self._boot_server(
|
||||
self._generate_random_name(), image, flavor, **kwargs)
|
||||
@ -62,7 +68,7 @@ class NovaServers(utils.NovaScenario,
|
||||
@validation.required_openstack(users=True)
|
||||
@base.scenario(context={"cleanup": ["nova"]})
|
||||
def list_servers(self, detailed=True):
|
||||
"""Test the nova list command.
|
||||
"""List all servers.
|
||||
|
||||
This simple scenario test the nova list command by listing
|
||||
all the servers.
|
||||
@ -81,7 +87,19 @@ class NovaServers(utils.NovaScenario,
|
||||
def boot_and_delete_server(self, image, flavor,
|
||||
min_sleep=0, max_sleep=0,
|
||||
force_delete=False, **kwargs):
|
||||
"""Tests booting and then deleting an image."""
|
||||
"""Boot and delete a server.
|
||||
|
||||
Optional 'min_sleep' and 'max_sleep' parameters allow the scenario
|
||||
to simulate a pause between volume creation and deletion
|
||||
(of random duration from [min_sleep, max_sleep]).
|
||||
|
||||
:param image: image to be used to boot an instance
|
||||
:param flavor: flavor to be used to boot an instance
|
||||
:param min_sleep: Minimum sleep time in seconds (non-negative)
|
||||
:param max_sleep: Maximum sleep time in seconds (non-negative)
|
||||
:param force_delete: True if force_delete should be used
|
||||
:param kwargs: Optional additional arguments for server creation
|
||||
"""
|
||||
server = self._boot_server(
|
||||
self._generate_random_name(), image, flavor, **kwargs)
|
||||
self.sleep_between(min_sleep, max_sleep)
|
||||
@ -97,7 +115,21 @@ class NovaServers(utils.NovaScenario,
|
||||
volume_size,
|
||||
min_sleep=0, max_sleep=0,
|
||||
force_delete=False, **kwargs):
|
||||
"""Tests booting from volume and then deleting an image and volume."""
|
||||
"""Boot a server from volume and then delete it.
|
||||
|
||||
The scenario first creates a volume and then a server.
|
||||
Optional 'min_sleep' and 'max_sleep' parameters allow the scenario
|
||||
to simulate a pause between volume creation and deletion
|
||||
(of random duration from [min_sleep, max_sleep]).
|
||||
|
||||
:param image: image to be used to boot an instance
|
||||
:param flavor: flavor to be used to boot an instance
|
||||
:param volume_size: volume size (in GB)
|
||||
:param min_sleep: Minimum sleep time in seconds (non-negative)
|
||||
:param max_sleep: Maximum sleep time in seconds (non-negative)
|
||||
:param force_delete: True if force_delete should be used
|
||||
:param kwargs: Optional additional arguments for server creation
|
||||
"""
|
||||
volume = self._create_volume(volume_size, imageRef=image)
|
||||
block_device_mapping = {'vda': '%s:::1' % volume.id}
|
||||
server = self._boot_server(self._generate_random_name(),
|
||||
@ -114,15 +146,24 @@ class NovaServers(utils.NovaScenario,
|
||||
@validation.required_openstack(users=True)
|
||||
@base.scenario(context={"cleanup": ["nova"]})
|
||||
def boot_and_bounce_server(self, image, flavor,
|
||||
force_delete=False, **kwargs):
|
||||
"""Test booting a server with further performing specified actions.
|
||||
force_delete=False, actions=None, **kwargs):
|
||||
"""Boot a server and run specified actions against it.
|
||||
|
||||
Actions should be passed into kwargs. Available actions are
|
||||
'hard_reboot', 'soft_reboot', 'stop_start' and 'rescue_unrescue'.
|
||||
Delete server after all actions.
|
||||
Actions should be passed into the actions parameter. Available actions
|
||||
are 'hard_reboot', 'soft_reboot', 'stop_start' and 'rescue_unrescue'.
|
||||
Delete server after all actions were completed.
|
||||
|
||||
:param image: image to be used to boot an instance
|
||||
:param flavor: flavor to be used to boot an instance
|
||||
:param force_delete: True if force_delete should be used
|
||||
:param actions: list of action dictionaries, where each action
|
||||
dictionary speicifes an action to be performed
|
||||
in the following format:
|
||||
{"action_name": <no_of_iterations>}
|
||||
:param kwargs: Optional additional arguments for server creation
|
||||
"""
|
||||
action_builder = self._bind_actions()
|
||||
actions = kwargs.get('actions', [])
|
||||
actions = actions or []
|
||||
try:
|
||||
action_builder.validate(actions)
|
||||
except jsonschema.exceptions.ValidationError as error:
|
||||
@ -143,7 +184,13 @@ class NovaServers(utils.NovaScenario,
|
||||
@base.scenario(context={"cleanup": ["nova", "glance"]})
|
||||
def snapshot_server(self, image, flavor,
|
||||
force_delete=False, **kwargs):
|
||||
"""Tests Nova instance snapshotting."""
|
||||
"""Boot a server, make its snapshot and delete both.
|
||||
|
||||
:param image: image to be used to boot an instance
|
||||
:param flavor: flavor to be used to boot an instance
|
||||
:param force_delete: True if force_delete should be used
|
||||
:param kwargs: Optional additional arguments for server creation
|
||||
"""
|
||||
server_name = self._generate_random_name()
|
||||
|
||||
server = self._boot_server(server_name, image, flavor, **kwargs)
|
||||
@ -161,7 +208,15 @@ class NovaServers(utils.NovaScenario,
|
||||
@validation.required_openstack(users=True)
|
||||
@base.scenario(context={"cleanup": ["nova"]})
|
||||
def boot_server(self, image, flavor, auto_assign_nic=False, **kwargs):
|
||||
"""Test VM boot - assumed clean-up is done elsewhere."""
|
||||
"""Boot a server.
|
||||
|
||||
Assumes that cleanup is done elsewhere.
|
||||
|
||||
:param image: image to be used to boot an instance
|
||||
:param flavor: flavor to be used to boot an instance
|
||||
:param auto_assign_nic: True if NICs should be assigned
|
||||
:param kwargs: Optional additional arguments for server creation
|
||||
"""
|
||||
server_name = self._generate_random_name()
|
||||
self._boot_server(server_name, image, flavor, auto_assign_nic,
|
||||
**kwargs)
|
||||
@ -174,7 +229,17 @@ class NovaServers(utils.NovaScenario,
|
||||
@base.scenario(context={"cleanup": ["nova", "cinder"]})
|
||||
def boot_server_from_volume(self, image, flavor, volume_size,
|
||||
auto_assign_nic=False, **kwargs):
|
||||
"""Test VM boot from volume - assumed clean-up is done elsewhere."""
|
||||
"""Boot a server from volume.
|
||||
|
||||
The scenario first creates a volume and then a server.
|
||||
Assumes that cleanup is done elsewhere.
|
||||
|
||||
:param image: image to be used to boot an instance
|
||||
:param flavor: flavor to be used to boot an instance
|
||||
:param volume_size: volume size (in GB)
|
||||
:param auto_assign_nic: True if NICs should be assigned
|
||||
:param kwargs: Optional additional arguments for server creation
|
||||
"""
|
||||
volume = self._create_volume(volume_size, imageRef=image)
|
||||
block_device_mapping = {'vda': '%s:::1' % volume.id}
|
||||
self._boot_server(self._generate_random_name(),
|
||||
@ -232,7 +297,17 @@ class NovaServers(utils.NovaScenario,
|
||||
@base.scenario(context={"cleanup": ["nova"]})
|
||||
def resize_server(self, image, flavor, to_flavor,
|
||||
force_delete=False, **kwargs):
|
||||
"""Tests resize serveri."""
|
||||
"""Boot a server, then resize and delete it.
|
||||
|
||||
The scenario first creates a volume and then a server.
|
||||
Assumes that cleanup is done elsewhere.
|
||||
|
||||
:param image: image to be used to boot an instance
|
||||
:param flavor: flavor to be used to boot an instance
|
||||
:param to_flavor: flavor to be used to resize the booted instance
|
||||
:param force_delete: True if force_delete should be used
|
||||
:param kwargs: Optional additional arguments for server creation
|
||||
"""
|
||||
server = self._boot_server(self._generate_random_name(),
|
||||
image, flavor, **kwargs)
|
||||
self._resize(server, to_flavor)
|
||||
@ -253,17 +328,18 @@ class NovaServers(utils.NovaScenario,
|
||||
def boot_and_live_migrate_server(self, image,
|
||||
flavor, block_migration=False,
|
||||
disk_over_commit=False, **kwargs):
|
||||
"""Tests VM Live Migration.
|
||||
"""Live Migrate a server.
|
||||
|
||||
This scenario launches a VM on a compute node available in
|
||||
the availability zone and then migrates the VM to another
|
||||
compute node on the same availability zone.
|
||||
|
||||
:param image: Glance image to be used to launch an instance
|
||||
:param flavor: Nova flavor to be used to launch an instance
|
||||
:param image: image to be used to boot an instance
|
||||
:param flavor: flavor to be used to boot an instance
|
||||
:param block_migration: Specifies the migration type
|
||||
:param disk_over_commit: Specifies whether to allow overcommit
|
||||
on migrated instance or not
|
||||
:param kwargs: Optional additional arguments for server creation
|
||||
"""
|
||||
server = self._boot_server(self._generate_random_name(),
|
||||
image, flavor, **kwargs)
|
||||
|
@ -70,17 +70,17 @@ CONF.register_opts(nova_benchmark_opts, group=benchmark_group)
|
||||
|
||||
|
||||
class NovaScenario(base.Scenario):
|
||||
"""Base class for Nova scenarios with basic atomic actions."""
|
||||
|
||||
@base.atomic_action_timer('nova.list_servers')
|
||||
def _list_servers(self, detailed=True):
|
||||
"""Returns user servers list."""
|
||||
|
||||
return self.clients("nova").servers.list(detailed)
|
||||
|
||||
@base.atomic_action_timer("nova.boot_server")
|
||||
def _boot_server(self, server_name, image_id, flavor_id,
|
||||
auto_assign_nic=False, **kwargs):
|
||||
"""Boots a server.
|
||||
"""Boot a server.
|
||||
|
||||
Returns when the server is actually booted and in "ACTIVE" state.
|
||||
|
||||
@ -91,7 +91,7 @@ class NovaScenario(base.Scenario):
|
||||
:param image_id: int, image ID for server creation
|
||||
:param flavor_id: int, flavor ID for server creation
|
||||
:param auto_assign_nic: bool, whether or not to auto assign NICs
|
||||
:param **kwargs: other optional parameters to initialize the server
|
||||
:param kwargs: other optional parameters to initialize the server
|
||||
:returns: nova Server instance
|
||||
"""
|
||||
allow_ssh_secgroup = self.context.get("allow_ssh")
|
||||
@ -139,7 +139,7 @@ class NovaScenario(base.Scenario):
|
||||
|
||||
@base.atomic_action_timer('nova.soft_reboot_server')
|
||||
def _soft_reboot_server(self, server):
|
||||
"""Reboots the given server using soft reboot.
|
||||
"""Reboot a server with soft reboot.
|
||||
|
||||
A soft reboot will be issued on the given server upon which time
|
||||
this method will wait for the server to become active.
|
||||
@ -150,7 +150,7 @@ class NovaScenario(base.Scenario):
|
||||
|
||||
@base.atomic_action_timer('nova.reboot_server')
|
||||
def _reboot_server(self, server):
|
||||
"""Reboots the given server using hard reboot.
|
||||
"""Reboot a server with hard reboot.
|
||||
|
||||
A reboot will be issued on the given server upon which time
|
||||
this method will wait for the server to become active.
|
||||
@ -161,7 +161,7 @@ class NovaScenario(base.Scenario):
|
||||
|
||||
@base.atomic_action_timer('nova.start_server')
|
||||
def _start_server(self, server):
|
||||
"""Starts the given server.
|
||||
"""Start the given server.
|
||||
|
||||
A start will be issued for the given server upon which time
|
||||
this method will wait for it to become ACTIVE.
|
||||
@ -247,7 +247,7 @@ class NovaScenario(base.Scenario):
|
||||
)
|
||||
|
||||
def _delete_server(self, server, force=False):
|
||||
"""Deletes the given server.
|
||||
"""Delete the given server.
|
||||
|
||||
Returns when the server is actually deleted.
|
||||
|
||||
@ -269,7 +269,7 @@ class NovaScenario(base.Scenario):
|
||||
)
|
||||
|
||||
def _delete_all_servers(self, force=False):
|
||||
"""Deletes all servers in current tenant.
|
||||
"""Delete all servers in the current tenant.
|
||||
|
||||
:param force: If True, force_delete will be used instead of delete.
|
||||
"""
|
||||
@ -282,7 +282,7 @@ class NovaScenario(base.Scenario):
|
||||
|
||||
@base.atomic_action_timer('nova.delete_image')
|
||||
def _delete_image(self, image):
|
||||
"""Deletes the given image.
|
||||
"""Delete the given image.
|
||||
|
||||
Returns when the image is actually deleted.
|
||||
|
||||
@ -299,7 +299,7 @@ class NovaScenario(base.Scenario):
|
||||
|
||||
@base.atomic_action_timer('nova.create_image')
|
||||
def _create_image(self, server):
|
||||
"""Creates an image of the given server
|
||||
"""Create an image from the given server
|
||||
|
||||
Uses the server name to name the created image. Returns when the image
|
||||
is actually created and is in the "Active" state.
|
||||
@ -324,7 +324,7 @@ class NovaScenario(base.Scenario):
|
||||
@base.atomic_action_timer('nova.boot_servers')
|
||||
def _boot_servers(self, name_prefix, image_id, flavor_id,
|
||||
requests, instances_amount=1, **kwargs):
|
||||
"""Boots multiple servers.
|
||||
"""Boot multiple servers.
|
||||
|
||||
Returns when all the servers are actually booted and are in the
|
||||
"Active" state.
|
||||
@ -362,12 +362,12 @@ class NovaScenario(base.Scenario):
|
||||
|
||||
@base.atomic_action_timer('nova.list_floating_ip_pools')
|
||||
def _list_floating_ip_pools(self):
|
||||
"""Returns user floating ip pools list."""
|
||||
"""Return user floating ip pools list."""
|
||||
return self.clients("nova").floating_ip_pools.list()
|
||||
|
||||
@base.atomic_action_timer('nova.list_floating_ips')
|
||||
def _list_floating_ips(self):
|
||||
"""Returns user floating ips list."""
|
||||
"""Return user floating ips list."""
|
||||
return self.clients("nova").floating_ips.list()
|
||||
|
||||
@base.atomic_action_timer('nova.create_floating_ip')
|
||||
@ -440,7 +440,7 @@ class NovaScenario(base.Scenario):
|
||||
|
||||
@base.atomic_action_timer('nova.list_networks')
|
||||
def _list_networks(self):
|
||||
"""Returns user networks list."""
|
||||
"""Return user networks list."""
|
||||
return self.clients("nova").networks.list()
|
||||
|
||||
@base.atomic_action_timer('nova.resize')
|
||||
@ -512,12 +512,12 @@ class NovaScenario(base.Scenario):
|
||||
@base.atomic_action_timer('nova.live_migrate')
|
||||
def _live_migrate(self, server, target_host, block_migration=False,
|
||||
disk_over_commit=False, skip_host_check=False):
|
||||
"""Live Migration of an specified server(Instance).
|
||||
"""Run live migration of the given server.
|
||||
|
||||
:param server: Server object
|
||||
:param target_host: Specifies the target compute node to migrate
|
||||
:param block_migration: Specifies the migration type
|
||||
:Param disk_over_commit: Specifies whether to overcommit migrated
|
||||
:param disk_over_commit: Specifies whether to overcommit migrated
|
||||
instance or not
|
||||
:param skip_host_check: Specifies whether to verify the targeted host
|
||||
availability
|
||||
@ -544,7 +544,7 @@ class NovaScenario(base.Scenario):
|
||||
|
||||
@base.atomic_action_timer('nova.find_host_to_migrate')
|
||||
def _find_host_to_migrate(self, server):
|
||||
"""Finds a compute node for live migration.
|
||||
"""Find a compute node for live migration.
|
||||
|
||||
:param server: Server object
|
||||
"""
|
||||
@ -600,7 +600,6 @@ class NovaScenario(base.Scenario):
|
||||
self.clients("nova").security_groups.delete(sg.id)
|
||||
|
||||
def _list_security_groups(self):
|
||||
"""Returns security groups list."""
|
||||
|
||||
"""Return security groups list."""
|
||||
with base.AtomicAction(self, "nova.list_security_groups"):
|
||||
return self.clients("nova").security_groups.list()
|
||||
|
@ -20,12 +20,13 @@ from rally import consts
|
||||
|
||||
|
||||
class Quotas(utils.QuotasScenario):
|
||||
"""Benchmark scenarios for quotas."""
|
||||
|
||||
@validation.required_services(consts.Service.NOVA)
|
||||
@validation.required_openstack(admin=True, users=True)
|
||||
@base.scenario(context={"admin_cleanup": ["nova.quotas"]})
|
||||
def nova_update(self, max_quota=1024):
|
||||
"""Tests updating quotas for nova.
|
||||
"""Update quotas for Nova.
|
||||
|
||||
:param max_quota: Max value to be updated for quota.
|
||||
"""
|
||||
@ -36,7 +37,7 @@ class Quotas(utils.QuotasScenario):
|
||||
@validation.required_openstack(admin=True, users=True)
|
||||
@base.scenario(context={"admin_cleanup": ["nova.quotas"]})
|
||||
def nova_update_and_delete(self, max_quota=1024):
|
||||
"""Tests updating and deleting quotas for nova.
|
||||
"""Update and delete quotas for Nova.
|
||||
|
||||
:param max_quota: Max value to be updated for quota.
|
||||
"""
|
||||
@ -49,7 +50,7 @@ class Quotas(utils.QuotasScenario):
|
||||
@validation.required_openstack(admin=True, users=True)
|
||||
@base.scenario(context={"admin_cleanup": ["cinder.quotas"]})
|
||||
def cinder_update(self, max_quota=1024):
|
||||
"""Tests updating quotas for cinder.
|
||||
"""Update quotas for Cinder.
|
||||
|
||||
:param max_quota: Max value to be updated for quota.
|
||||
"""
|
||||
@ -60,7 +61,7 @@ class Quotas(utils.QuotasScenario):
|
||||
@validation.required_openstack(admin=True, users=True)
|
||||
@base.scenario(context={"admin_cleanup": ["cinder.quotas"]})
|
||||
def cinder_update_and_delete(self, max_quota=1024):
|
||||
"""Tests updating and deleting quotas for cinder.
|
||||
"""Update and Delete quotas for Cinder.
|
||||
|
||||
:param max_quota: Max value to be updated for quota.
|
||||
"""
|
||||
|
@ -19,10 +19,11 @@ from rally.benchmark.scenarios import base
|
||||
|
||||
|
||||
class QuotasScenario(base.Scenario):
|
||||
"""Base class for quotas scenarios with basic atomic actions."""
|
||||
|
||||
@base.atomic_action_timer('quotas.update_quotas')
|
||||
def _update_quotas(self, component, tenant_id, max_quota=1024):
|
||||
"""Updates quotas.
|
||||
"""Update quotas.
|
||||
|
||||
:param component: Component for the quotas.
|
||||
:param tenant_id: The project_id for the quotas to be updated.
|
||||
@ -35,7 +36,7 @@ class QuotasScenario(base.Scenario):
|
||||
|
||||
@base.atomic_action_timer('quotas.delete_quotas')
|
||||
def _delete_quotas(self, component, tenant_id):
|
||||
"""Deletes quotas.
|
||||
"""Delete quotas.
|
||||
|
||||
:param component: Component for the quotas.
|
||||
:param tenant_id: The project_id for the quotas to be updated.
|
||||
|
@ -22,7 +22,7 @@ class WrongStatusException(exceptions.RallyException):
|
||||
|
||||
|
||||
class Requests(base.Scenario):
|
||||
"""This class should contain all the http_request scenarios."""
|
||||
"""Benchmark scenarios for HTTP requests."""
|
||||
|
||||
@base.scenario()
|
||||
def check_response(self, url, response=None):
|
||||
@ -32,7 +32,7 @@ class Requests(base.Scenario):
|
||||
Response.
|
||||
|
||||
:param url: URL to be fetched
|
||||
:param response: Expected Response Code
|
||||
:param response: expected response code
|
||||
"""
|
||||
resp = requests.head(url)
|
||||
if response and response != resp.status_code:
|
||||
|
@ -24,6 +24,7 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SaharaClusters(utils.SaharaScenario):
|
||||
"""Benchmark scenarios for Sahara clusters."""
|
||||
|
||||
@types.set(flavor=types.FlavorResourceType,
|
||||
neutron_net=types.NeutronNetworkResourceType,
|
||||
@ -40,37 +41,39 @@ class SaharaClusters(utils.SaharaScenario):
|
||||
volumes_size=None, auto_security_group=None,
|
||||
security_groups=None, node_configs=None,
|
||||
cluster_configs=None):
|
||||
"""Test the Sahara Cluster launch and delete commands.
|
||||
"""Launch and delete a Sahara Cluster.
|
||||
|
||||
This scenario launches a Hadoop cluster, waits until it becomes
|
||||
'Active' and deletes it.
|
||||
|
||||
:param flavor: The Nova flavor that will be for nodes in the
|
||||
created node groups
|
||||
:param node_count: The total number of instances in a cluster (>= 2)
|
||||
:param plugin_name: The name of a provisioning plugin
|
||||
:param hadoop_version: The version of Hadoop distribution supported by
|
||||
the specified plugin.
|
||||
:param floating_ip_pool: The floating ip pool name from which Floating
|
||||
IPs will be allocated. Sahara will determine automatically how to treat
|
||||
this depending on it's own configurations. Defaults to None because in
|
||||
some cases Sahara may work w/o Floating IPs.
|
||||
:param neutron_net: The id or name of a Neutron network that
|
||||
will be used for fixed IPs. This parameter is ignored when Nova Network
|
||||
is set up.
|
||||
:param volumes_per_node: The number of Cinder volumes that will be
|
||||
attached to every cluster node
|
||||
:param volumes_size: The size of each Cinder volume in GB
|
||||
:param auto_security_group: Boolean value. If set to True Sahara will
|
||||
create a Security Group for each Node Group in the Cluster
|
||||
automatically.
|
||||
:param security_groups: The list of security groups that will be used
|
||||
while creating VMs. If auto_security_group is set to True this list
|
||||
can be left empty.
|
||||
:param node_configs: The configs dict that will be passed to each Node
|
||||
Group
|
||||
:param cluster_configs: The configs dict that will be passed to the
|
||||
Cluster
|
||||
:param flavor: Nova flavor that will be for nodes in the
|
||||
created node groups
|
||||
:param node_count: total number of instances in a cluster (>= 2)
|
||||
:param plugin_name: name of a provisioning plugin
|
||||
:param hadoop_version: version of Hadoop distribution supported by
|
||||
the specified plugin.
|
||||
:param floating_ip_pool: floating ip pool name from which Floating
|
||||
IPs will be allocated. Sahara will determine
|
||||
automatically how to treat this depending on
|
||||
it's own configurations. Defaults to None
|
||||
because in some cases Sahara may work w/o
|
||||
Floating IPs.
|
||||
:param neutron_net: id or name of a Neutron network that
|
||||
will be used for fixed IPs. This parameter is
|
||||
ignored when Nova Network is set up.
|
||||
:param volumes_per_node: number of Cinder volumes that will be
|
||||
attached to every cluster node
|
||||
:param volumes_size: size of each Cinder volume in GB
|
||||
:param auto_security_group: boolean value. If set to True Sahara will
|
||||
create a Security Group for each Node Group
|
||||
in the Cluster automatically.
|
||||
:param security_groups: list of security groups that will be used
|
||||
while creating VMs. If auto_security_group
|
||||
is set to True, this list can be left empty.
|
||||
:param node_configs: config dict that will be passed to each Node
|
||||
Group
|
||||
:param cluster_configs: config dict that will be passed to the
|
||||
Cluster
|
||||
"""
|
||||
|
||||
image_id = self.context["tenant"]["sahara_image"]
|
||||
@ -107,7 +110,7 @@ class SaharaClusters(utils.SaharaScenario):
|
||||
auto_security_group=None,
|
||||
security_groups=None, node_configs=None,
|
||||
cluster_configs=None):
|
||||
"""Test the Sahara Cluster launch, scale and delete commands.
|
||||
"""Launch, scale and delete a Sahara Cluster.
|
||||
|
||||
This scenario launches a Hadoop cluster, waits until it becomes
|
||||
'Active'. Then a series of scale operations is applied. The scaling
|
||||
@ -115,34 +118,36 @@ class SaharaClusters(utils.SaharaScenario):
|
||||
deltas is set to [2, -2] it means that the first scaling operation will
|
||||
add 2 worker nodes to the cluster and the second will remove two.
|
||||
|
||||
:param flavor: The Nova flavor that will be for nodes in the
|
||||
created node groups
|
||||
:param node_count: The total number of instances in a cluster (>= 2)
|
||||
:param plugin_name: The name of a provisioning plugin
|
||||
:param hadoop_version: The version of Hadoop distribution supported by
|
||||
the specified plugin.
|
||||
:param deltas: The list of integers which will be used to add or
|
||||
remove worker nodes from the cluster
|
||||
:param floating_ip_pool: The floating ip pool name from which Floating
|
||||
IPs will be allocated. Sahara will determine automatically how to treat
|
||||
this depending on it's own configurations. Defaults to None because in
|
||||
some cases Sahara may work w/o Floating IPs.
|
||||
:param neutron_net_id: The id of a Neutron network that
|
||||
will be used for fixed IPs. This parameter is ignored when Nova Network
|
||||
is set up.
|
||||
:param volumes_per_node: The number of Cinder volumes that will be
|
||||
attached to every cluster node
|
||||
:param volumes_size: The size of each Cinder volume in GB
|
||||
:param auto_security_group: Boolean value. If set to True Sahara will
|
||||
create a Security Group for each Node Group in the Cluster
|
||||
automatically.
|
||||
:param security_groups: The list of security groups that will be used
|
||||
while creating VMs. If auto_security_group is set to True this list
|
||||
can be left empty.
|
||||
:param node_configs: The configs dict that will be passed to each Node
|
||||
Group
|
||||
:param cluster_configs: The configs dict that will be passed to the
|
||||
Cluster
|
||||
:param flavor: Nova flavor that will be for nodes in the
|
||||
created node groups
|
||||
:param node_count: total number of instances in a cluster (>= 2)
|
||||
:param plugin_name: name of a provisioning plugin
|
||||
:param hadoop_version: version of Hadoop distribution supported by
|
||||
the specified plugin.
|
||||
:param deltas: list of integers which will be used to add or
|
||||
remove worker nodes from the cluster
|
||||
:param floating_ip_pool: floating ip pool name from which Floating
|
||||
IPs will be allocated. Sahara will determine
|
||||
automatically how to treat this depending on
|
||||
it's own configurations. Defaults to None
|
||||
because in some cases Sahara may work w/o
|
||||
Floating IPs.
|
||||
:param neutron_net_id: id of a Neutron network that will be used
|
||||
for fixed IPs. This parameter is ignored when
|
||||
Nova Network is set up.
|
||||
:param volumes_per_node: number of Cinder volumes that will be
|
||||
attached to every cluster node
|
||||
:param volumes_size: size of each Cinder volume in GB
|
||||
:param auto_security_group: boolean value. If set to True Sahara will
|
||||
create a Security Group for each Node Group
|
||||
in the Cluster automatically.
|
||||
:param security_groups: list of security groups that will be used
|
||||
while creating VMs. If auto_security_group
|
||||
is set to True this list can be left empty.
|
||||
:param node_configs: configs dict that will be passed to each Node
|
||||
Group
|
||||
:param cluster_configs: configs dict that will be passed to the
|
||||
Cluster
|
||||
"""
|
||||
|
||||
image_id = self.context["tenant"]["sahara_image"]
|
||||
|
@ -23,22 +23,23 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SaharaJob(utils.SaharaScenario):
|
||||
"""Benchmark scenarios for Sahara jobs."""
|
||||
|
||||
@validation.required_services(consts.Service.SAHARA)
|
||||
@validation.required_contexts("users", "sahara_image", "sahara_edp",
|
||||
"sahara_cluster")
|
||||
@base.scenario(context={"cleanup": ["sahara"]})
|
||||
def create_launch_job(self, job_type, configs, job_idx=0):
|
||||
"""Test the Sahara EDP Job execution.
|
||||
|
||||
:param job_type: The type of the Data Processing Job
|
||||
:param configs: The configs dict that will be passed to a Job Execution
|
||||
:param job_idx: The index of a job in a sequence. This index will be
|
||||
used to create different atomic actions for each job in a sequence
|
||||
"""Create and execute a Sahara EDP Job.
|
||||
|
||||
This scenario Creates a Job entity and launches an execution on a
|
||||
Cluster.
|
||||
|
||||
:param job_type: type of the Data Processing Job
|
||||
:param configs: config dict that will be passed to a Job Execution
|
||||
:param job_idx: index of a job in a sequence. This index will be
|
||||
used to create different atomic actions for each job
|
||||
in a sequence
|
||||
"""
|
||||
|
||||
mains = self.context["tenant"]["sahara_mains"]
|
||||
@ -72,13 +73,12 @@ class SaharaJob(utils.SaharaScenario):
|
||||
"sahara_cluster")
|
||||
@base.scenario(context={"cleanup": ["sahara"]})
|
||||
def create_launch_job_sequence(self, jobs):
|
||||
"""Test the Sahara EDP Job sequence execution.
|
||||
|
||||
:param jobs: The list of jobs that should be executed in one context
|
||||
"""Create and execute a sequence of the Sahara EDP Jobs.
|
||||
|
||||
This scenario Creates a Job entity and launches an execution on a
|
||||
Cluster for every job object provided.
|
||||
|
||||
:param jobs: list of jobs that should be executed in one context
|
||||
"""
|
||||
|
||||
for idx, job in enumerate(jobs):
|
||||
@ -90,16 +90,15 @@ class SaharaJob(utils.SaharaScenario):
|
||||
"sahara_cluster")
|
||||
@base.scenario(context={"cleanup": ["sahara"]})
|
||||
def create_launch_job_sequence_with_scaling(self, jobs, deltas):
|
||||
"""Test the Sahara EDP Job sequence execution on a scaling Cluster.
|
||||
|
||||
:param jobs: The list of jobs that should be executed in one context
|
||||
:param deltas: The list of integers which will be used to add or
|
||||
remove worker nodes from the cluster
|
||||
"""Create and execute Sahara EDP Jobs on a scaling Cluster.
|
||||
|
||||
This scenario Creates a Job entity and launches an execution on a
|
||||
Cluster for every job object provided. The Cluster is scaled according
|
||||
to the deltas values and the sequence is launched again
|
||||
to the deltas values and the sequence is launched again.
|
||||
|
||||
:param jobs: list of jobs that should be executed in one context
|
||||
:param deltas: list of integers which will be used to add or
|
||||
remove worker nodes from the cluster
|
||||
"""
|
||||
|
||||
cluster_id = self.context["tenant"]["sahara_cluster"]
|
||||
|
@ -21,6 +21,7 @@ from rally import consts
|
||||
|
||||
|
||||
class SaharaNodeGroupTemplates(utils.SaharaScenario):
|
||||
"""Benchmark scenarios for Sahara node group templates."""
|
||||
|
||||
@types.set(flavor=types.FlavorResourceType)
|
||||
@validation.flavor_exists('flavor')
|
||||
@ -30,7 +31,7 @@ class SaharaNodeGroupTemplates(utils.SaharaScenario):
|
||||
def create_and_list_node_group_templates(self, flavor,
|
||||
plugin_name="vanilla",
|
||||
hadoop_version="1.2.1"):
|
||||
"""Test the sahara Node Group Templates create and list commands.
|
||||
"""Create and list Sahara Node Group Templates.
|
||||
|
||||
This scenario creates two Node Group Templates with different set of
|
||||
node processes. The master Node Group Template contains Hadoop's
|
||||
@ -42,11 +43,11 @@ class SaharaNodeGroupTemplates(utils.SaharaScenario):
|
||||
|
||||
After the templates are created the list operation is called.
|
||||
|
||||
:param flavor: The Nova flavor that will be for nodes in the
|
||||
created node groups
|
||||
:param plugin_name: The name of a provisioning plugin
|
||||
:param hadoop_version: The version of Hadoop distribution supported by
|
||||
the specified plugin.
|
||||
:param flavor: Nova flavor that will be for nodes in the
|
||||
created node groups
|
||||
:param plugin_name: name of a provisioning plugin
|
||||
:param hadoop_version: version of Hadoop distribution supported by
|
||||
the specified plugin.
|
||||
"""
|
||||
|
||||
self._create_master_node_group_template(flavor_id=flavor,
|
||||
@ -65,7 +66,7 @@ class SaharaNodeGroupTemplates(utils.SaharaScenario):
|
||||
def create_delete_node_group_templates(self, flavor,
|
||||
plugin_name="vanilla",
|
||||
hadoop_version="1.2.1"):
|
||||
"""Test create and delete commands.
|
||||
"""Create and delete Sahara Node Group Templates.
|
||||
|
||||
This scenario creates and deletes two most common types of
|
||||
Node Group Templates.
|
||||
@ -73,11 +74,11 @@ class SaharaNodeGroupTemplates(utils.SaharaScenario):
|
||||
By default the templates are created for the vanilla Hadoop
|
||||
provisioning plugin using the version 1.2.1
|
||||
|
||||
:param flavor: The Nova flavor that will be for nodes in the
|
||||
created node groups
|
||||
:param plugin_name: The name of a provisioning plugin
|
||||
:param hadoop_version: The version of Hadoop distribution supported by
|
||||
the specified plugin.
|
||||
:param flavor: Nova flavor that will be for nodes in the
|
||||
created node groups
|
||||
:param plugin_name: name of a provisioning plugin
|
||||
:param hadoop_version: version of Hadoop distribution supported by
|
||||
the specified plugin.
|
||||
"""
|
||||
|
||||
master_ngt = self._create_master_node_group_template(
|
||||
|
@ -40,6 +40,7 @@ CONF.register_opts(TIMEOUT_OPTS, group=benchmark_group)
|
||||
|
||||
|
||||
class SaharaScenario(base.Scenario):
|
||||
"""Base class for Sahara scenarios with basic atomic actions."""
|
||||
|
||||
RESOURCE_NAME_LENGTH = 20
|
||||
|
||||
@ -110,22 +111,20 @@ class SaharaScenario(base.Scenario):
|
||||
|
||||
@base.atomic_action_timer('sahara.list_node_group_templates')
|
||||
def _list_node_group_templates(self):
|
||||
"""Returns user Node Group Templates list."""
|
||||
|
||||
"""Return user Node Group Templates list."""
|
||||
return self.clients("sahara").node_group_templates.list()
|
||||
|
||||
@base.atomic_action_timer('sahara.create_master_node_group_template')
|
||||
def _create_master_node_group_template(self, flavor_id, plugin_name,
|
||||
hadoop_version):
|
||||
"""Creates a master Node Group Template with a random name.
|
||||
"""Create a master Node Group Template with a random name.
|
||||
|
||||
:param flavor_id: The required argument for the Template
|
||||
:param plugin_name: Sahara provisioning plugin name
|
||||
:param hadoop_version: The version of Hadoop distribution supported by
|
||||
the plugin
|
||||
:return: The created Template
|
||||
the plugin
|
||||
:returns: The created Template
|
||||
"""
|
||||
|
||||
name = self._generate_random_name(prefix="master-ngt-")
|
||||
|
||||
return self.clients("sahara").node_group_templates.create(
|
||||
@ -139,15 +138,14 @@ class SaharaScenario(base.Scenario):
|
||||
@base.atomic_action_timer('sahara.create_worker_node_group_template')
|
||||
def _create_worker_node_group_template(self, flavor_id, plugin_name,
|
||||
hadoop_version):
|
||||
"""Creates a worker Node Group Template with a random name.
|
||||
"""Create a worker Node Group Template with a random name.
|
||||
|
||||
:param flavor_id: The required argument for the Template
|
||||
:param plugin_name: Sahara provisioning plugin name
|
||||
:param hadoop_version: The version of Hadoop distribution supported by
|
||||
the plugin
|
||||
:return: The created Template
|
||||
the plugin
|
||||
:returns: The created Template
|
||||
"""
|
||||
|
||||
name = self._generate_random_name(prefix="worker-ngt-")
|
||||
|
||||
return self.clients("sahara").node_group_templates.create(
|
||||
@ -160,12 +158,10 @@ class SaharaScenario(base.Scenario):
|
||||
|
||||
@base.atomic_action_timer('sahara.delete_node_group_template')
|
||||
def _delete_node_group_template(self, node_group):
|
||||
"""Deletes a Node Group Template by id.
|
||||
"""Delete a Node Group Template by id.
|
||||
|
||||
:param node_group: The Node Group Template to be deleted
|
||||
:return:
|
||||
"""
|
||||
|
||||
self.clients("sahara").node_group_templates.delete(node_group.id)
|
||||
|
||||
def _wait_active(self, cluster_object):
|
||||
@ -182,39 +178,38 @@ class SaharaScenario(base.Scenario):
|
||||
volumes_size=None, auto_security_group=None,
|
||||
security_groups=None, node_configs=None,
|
||||
cluster_configs=None, wait_active=True):
|
||||
"""Creates a cluster and wait until it becomes Active.
|
||||
"""Create a cluster and wait until it becomes Active.
|
||||
|
||||
The cluster is created with two node groups. The master Node Group is
|
||||
created with one instance. The worker node group contains
|
||||
node_count - 1 instances.
|
||||
|
||||
:param plugin_name: The provisioning plugin name
|
||||
:param plugin_name: provisioning plugin name
|
||||
:param hadoop_version: Hadoop version supported by the plugin
|
||||
:param flavor_id: The flavor which will be used to create instances
|
||||
:param image_id: The image id that will be used to boot instances
|
||||
:param node_count: The total number of instances. 1 master node, others
|
||||
for the workers
|
||||
:param floating_ip_pool: The floating ip pool name from which Floating
|
||||
IPs will be allocated
|
||||
:param neutron_net_id: The network id to allocate Fixed IPs
|
||||
from, when Neutron is enabled for networking
|
||||
:param volumes_per_node: The number of Cinder volumes that will be
|
||||
attached to every cluster node
|
||||
:param volumes_size: The size of each Cinder volume in GB
|
||||
:param auto_security_group: Boolean value. If set to True Sahara will
|
||||
create a Security Group for each Node Group in the Cluster
|
||||
automatically.
|
||||
:param security_groups: The list of security groups that will be used
|
||||
while creating VMs. If auto_security_group is set to True this list
|
||||
can be left empty.
|
||||
:param node_configs: The configs dict that will be passed to each Node
|
||||
Group
|
||||
:param cluster_configs: The configs dict that will be passed to the
|
||||
Cluster
|
||||
:param flavor_id: flavor which will be used to create instances
|
||||
:param image_id: image id that will be used to boot instances
|
||||
:param node_count: total number of instances. 1 master node, others
|
||||
for the workers
|
||||
:param floating_ip_pool: floating ip pool name from which Floating
|
||||
IPs will be allocated
|
||||
:param neutron_net_id: network id to allocate Fixed IPs
|
||||
from, when Neutron is enabled for networking
|
||||
:param volumes_per_node: number of Cinder volumes that will be
|
||||
attached to every cluster node
|
||||
:param volumes_size: size of each Cinder volume in GB
|
||||
:param auto_security_group: boolean value. If set to True Sahara will
|
||||
create a Security Group for each Node Group
|
||||
in the Cluster automatically.
|
||||
:param security_groups: list of security groups that will be used
|
||||
while creating VMs. If auto_security_group is
|
||||
set to True, this list can be left empty.
|
||||
:param node_configs: configs dict that will be passed to each Node
|
||||
Group
|
||||
:param cluster_configs: configs dict that will be passed to the
|
||||
Cluster
|
||||
:param wait_active: Wait until a Cluster gets int "Active" state
|
||||
:return: The created cluster
|
||||
:returns: created cluster
|
||||
"""
|
||||
|
||||
node_groups = [
|
||||
{
|
||||
"name": "master-ng",
|
||||
@ -313,7 +308,6 @@ class SaharaScenario(base.Scenario):
|
||||
There two specific scaling methods of up and down scaling which have
|
||||
different atomic timers.
|
||||
"""
|
||||
|
||||
worker_node_group = [g for g in cluster.node_groups
|
||||
if "worker" in g["name"]][0]
|
||||
scale_object = {
|
||||
@ -330,34 +324,30 @@ class SaharaScenario(base.Scenario):
|
||||
|
||||
@base.atomic_action_timer('sahara.scale_up')
|
||||
def _scale_cluster_up(self, cluster, delta):
|
||||
"""Adds a given number of worker nodes to the cluster.
|
||||
"""Add a given number of worker nodes to the cluster.
|
||||
|
||||
:param cluster: The cluster to be scaled
|
||||
:param delta: The number of workers to be added. (A positive number is
|
||||
expected here)
|
||||
expected here)
|
||||
"""
|
||||
|
||||
self._scale_cluster(cluster, delta)
|
||||
|
||||
@base.atomic_action_timer('sahara.scale_down')
|
||||
def _scale_cluster_down(self, cluster, delta):
|
||||
"""Removes a given number of worker nodes from the cluster.
|
||||
"""Remove a given number of worker nodes from the cluster.
|
||||
|
||||
:param cluster: The cluster to be scaled
|
||||
:param delta: The number of workers to be removed. (A negative number
|
||||
is expected here)
|
||||
is expected here)
|
||||
"""
|
||||
|
||||
self._scale_cluster(cluster, delta)
|
||||
|
||||
@base.atomic_action_timer('sahara.delete_cluster')
|
||||
def _delete_cluster(self, cluster):
|
||||
"""Calls a Cluster delete by id and waits for complete deletion.
|
||||
"""Delete cluster.
|
||||
|
||||
:param cluster: The Cluster to be deleted
|
||||
:return:
|
||||
:param cluster: cluster to delete
|
||||
"""
|
||||
|
||||
self.clients("sahara").clusters.delete(cluster.id)
|
||||
|
||||
bench_utils.wait_for(resource=cluster.id,
|
||||
@ -371,11 +361,10 @@ class SaharaScenario(base.Scenario):
|
||||
return True
|
||||
|
||||
def _create_output_ds(self):
|
||||
"""Creates an output Data Source based on EDP context
|
||||
"""Create an output Data Source based on EDP context
|
||||
|
||||
:return: The created Data Source
|
||||
"""
|
||||
|
||||
ds_type = self.context["sahara_output_conf"]["output_type"]
|
||||
url_prefix = self.context["sahara_output_conf"]["output_url_prefix"]
|
||||
|
||||
@ -394,7 +383,7 @@ class SaharaScenario(base.Scenario):
|
||||
|
||||
def _run_job_execution(self, job_id, cluster_id, input_id, output_id,
|
||||
configs, job_idx):
|
||||
"""Runs a Job Execution and waits until it completes or fails.
|
||||
"""Run a Job Execution and wait until it completes or fails.
|
||||
|
||||
The Job Execution is accepted as successful when Oozie reports
|
||||
"success" or "succeeded" status. The failure statuses are "failed" and
|
||||
@ -409,11 +398,10 @@ class SaharaScenario(base.Scenario):
|
||||
:param input_id: The input Data Source id
|
||||
:param output_id: The output Data Source id
|
||||
:param configs: The config dict that will be passed as Job Execution's
|
||||
parameters.
|
||||
parameters.
|
||||
:param job_idx: The index of a job in a sequence
|
||||
|
||||
"""
|
||||
|
||||
@base.atomic_action_timer('sahara.job_execution_%s' % job_idx)
|
||||
def run(self):
|
||||
job_execution = self.clients("sahara").job_executions.create(
|
||||
|
@ -20,13 +20,14 @@ from rally import consts
|
||||
|
||||
|
||||
class TempestScenario(base.Scenario):
|
||||
"""Benchmark scenarios that launch Tempest tests."""
|
||||
|
||||
@validation.tempest_tests_exists()
|
||||
@validation.required_openstack(admin=True)
|
||||
@base.scenario(context={"tempest": {}})
|
||||
@utils.tempest_log_wrapper
|
||||
def single_test(self, test_name, log_file, tempest_conf=None):
|
||||
"""Launch a single test
|
||||
"""Launch a single Tempest test by its name.
|
||||
|
||||
:param test_name: name of tempest scenario for launching
|
||||
:param log_file: name of file for junitxml results
|
||||
@ -43,7 +44,7 @@ class TempestScenario(base.Scenario):
|
||||
@base.scenario(context={"tempest": {}})
|
||||
@utils.tempest_log_wrapper
|
||||
def all(self, log_file, tempest_conf=None):
|
||||
"""Launch all discovered tests
|
||||
"""Launch all discovered Tempest tests by their names.
|
||||
|
||||
:param log_file: name of file for junitxml results
|
||||
:param tempest_conf: User specified tempest.conf location
|
||||
@ -57,7 +58,7 @@ class TempestScenario(base.Scenario):
|
||||
@base.scenario(context={"tempest": {}})
|
||||
@utils.tempest_log_wrapper
|
||||
def set(self, set_name, log_file, tempest_conf=None):
|
||||
"""Launch one by one methods from the set
|
||||
"""Launch all Tempest tests from a given set.
|
||||
|
||||
:param set_name: set name of tempest scenarios for launching
|
||||
:param log_file: name of file for junitxml results
|
||||
@ -79,7 +80,7 @@ class TempestScenario(base.Scenario):
|
||||
@base.scenario(context={"tempest": {}})
|
||||
@utils.tempest_log_wrapper
|
||||
def list_of_tests(self, test_names, log_file, tempest_conf=None):
|
||||
"""Launch all tests from given list
|
||||
"""Launch all Tempest tests from a given list of their names.
|
||||
|
||||
:param test_names: list of tempest scenarios for launching
|
||||
:param log_file: name of file for junitxml results
|
||||
@ -93,8 +94,9 @@ class TempestScenario(base.Scenario):
|
||||
@base.scenario(context={"tempest": {}})
|
||||
@utils.tempest_log_wrapper
|
||||
def specific_regex(self, regex, log_file, tempest_conf=None):
|
||||
"""Launch all tests which match given regex
|
||||
"""Launch Tempest tests whose names match a given regular expression.
|
||||
|
||||
:param regex: regexp to match Tempest test names against
|
||||
:param log_file: name of file for junitxml results
|
||||
:param tempest_conf: User specified tempest.conf location
|
||||
"""
|
||||
|
@ -23,12 +23,18 @@ from rally import sshutils
|
||||
|
||||
|
||||
class VMScenario(base.Scenario):
|
||||
"""Base class for VM scenarios with basic atomic actions.
|
||||
|
||||
VM scenarios are scenarios executed inside some launched VM instance.
|
||||
"""
|
||||
|
||||
@base.atomic_action_timer('vm.run_command')
|
||||
def run_action(self, ssh, interpreter, script):
|
||||
"""Run command inside an instance.
|
||||
|
||||
This is a separate function so that only script execution is timed
|
||||
This is a separate function so that only script execution is timed.
|
||||
|
||||
:returns: tuple (exit_status, stdout, stderr)
|
||||
"""
|
||||
return ssh.execute(interpreter, stdin=open(script, "rb"))
|
||||
|
||||
@ -64,7 +70,7 @@ class VMScenario(base.Scenario):
|
||||
"""Check if a server is attached to the specified network.
|
||||
|
||||
:param server: The server object to consider
|
||||
:param network: The name of the network to search for.
|
||||
:param network: The name of the network to search for
|
||||
|
||||
:raises: `ValueError` if server is not attached to network.
|
||||
"""
|
||||
|
@ -27,6 +27,7 @@ from rally import exceptions
|
||||
|
||||
class VMTasks(nova_utils.NovaScenario, vm_utils.VMScenario,
|
||||
cinder_utils.CinderScenario):
|
||||
"""Benchmark scenarios that are to be run inside VM instances."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(VMTasks, self).__init__(*args, **kwargs)
|
||||
@ -51,12 +52,14 @@ class VMTasks(nova_utils.NovaScenario, vm_utils.VMScenario,
|
||||
use_floatingip=True,
|
||||
force_delete=False,
|
||||
**kwargs):
|
||||
"""Boot server, run a script that outputs JSON, delete server.
|
||||
"""Boot a server, run a script that outputs JSON, delete the server.
|
||||
|
||||
Example Script in doc/samples/tasks/support/instance_dd_test.sh
|
||||
|
||||
:param image: glance image name to use for the vm
|
||||
:param flavor: VM flavor name
|
||||
:param script: script to run on the server, must output JSON mapping
|
||||
metric names to values. See sample script below.
|
||||
metric names to values (see the sample script below)
|
||||
:param interpreter: The shell interpreter to use when running script
|
||||
:param username: User to SSH to instance as
|
||||
:param volume_args: volume args when boot VM from volume
|
||||
@ -65,15 +68,12 @@ class VMTasks(nova_utils.NovaScenario, vm_utils.VMScenario,
|
||||
:param ip_version: Version of ip protocol to use for connection
|
||||
:param port: Port to use for SSH connection
|
||||
:param use_floatingip: Whether to associate a floating ip for
|
||||
connection
|
||||
connection
|
||||
:param force_delete: Whether to use force_delete for instances
|
||||
|
||||
:returns: Dictionary containing two keys, data and errors. Data is JSON
|
||||
data output by the script. Errors is raw data from the
|
||||
script's standard error stream.
|
||||
|
||||
|
||||
Example Script in doc/samples/tasks/support/instance_dd_test.sh
|
||||
data output by the script. Errors is raw data from the
|
||||
script's standard error stream.
|
||||
"""
|
||||
if volume_args:
|
||||
volume = self._create_volume(volume_args['size'], imageRef=None)
|
||||
|
@ -20,38 +20,38 @@ from rally.benchmark import validation
|
||||
|
||||
|
||||
class ZaqarBasic(zutils.ZaqarScenario):
|
||||
"""Benchmark scenarios for Zaqar."""
|
||||
|
||||
@validation.number("name_length", minval=10)
|
||||
@base.scenario(context={"cleanup": ["zaqar"]})
|
||||
def create_queue(self, name_length=10, **kwargs):
|
||||
"""Creates Zaqar queue with random name
|
||||
"""Create a Zaqar queue with a random name.
|
||||
|
||||
:param name_length: length of generated (random) part of name
|
||||
:param kwargs: other optional parameters to create queues like
|
||||
"metadata"
|
||||
"""
|
||||
|
||||
self._queue_create(name_length=name_length, **kwargs)
|
||||
|
||||
@validation.number("name_length", minval=10)
|
||||
@base.scenario(context={"cleanup": ["zaqar"]})
|
||||
def producer_consumer(self, name_length=10,
|
||||
min_msg_count=50, max_msg_count=200, **kwargs):
|
||||
"""Serial producer/consumer
|
||||
"""Serial message producer/consumer.
|
||||
|
||||
Creates a Zaqar queue with random name, sends a set of messages
|
||||
and then retrieves an iterator containing those
|
||||
and then retrieves an iterator containing those.
|
||||
|
||||
:param name_length: length of generated (random) part of name
|
||||
:param min_msg_count: min number of messages to be posted
|
||||
:param max_msg_count: max number of messages to be posted
|
||||
:param kwargs: other optional parameters to create queues like
|
||||
'metadata'
|
||||
"metadata"
|
||||
"""
|
||||
|
||||
queue = self._queue_create(name_length=name_length, **kwargs)
|
||||
msg_count = random.randint(min_msg_count, max_msg_count)
|
||||
messages = [{'body': {'id': idx}, 'ttl': 360} for idx
|
||||
messages = [{"body": {"id": idx}, "ttl": 360} for idx
|
||||
in range(msg_count)]
|
||||
self._messages_post(queue, messages, min_msg_count, max_msg_count)
|
||||
self._messages_list(queue)
|
||||
|
@ -16,23 +16,23 @@ from rally.benchmark.scenarios import base
|
||||
|
||||
|
||||
class ZaqarScenario(base.Scenario):
|
||||
"""Base class for Zaqar scenarios with basic atomic actions."""
|
||||
|
||||
@base.atomic_action_timer('zaqar.create_queue')
|
||||
def _queue_create(self, name_length=10, **kwargs):
|
||||
"""Creates Zaqar queue with random name
|
||||
"""Create a Zaqar queue with random name.
|
||||
|
||||
:param name_length: length of generated (random) part of name
|
||||
:param **kwargs: other optional parameters to create queues like
|
||||
"metadata"
|
||||
:param kwargs: other optional parameters to create queues like
|
||||
"metadata"
|
||||
:returns: Zaqar queue instance
|
||||
"""
|
||||
|
||||
name = self._generate_random_name(length=name_length)
|
||||
return self.clients("zaqar").queue(name, **kwargs)
|
||||
|
||||
@base.atomic_action_timer('zaqar.delete_queue')
|
||||
def _queue_delete(self, queue):
|
||||
"""Removes a Zaqar queue
|
||||
"""Removes a Zaqar queue.
|
||||
|
||||
:param queue: queue to remove
|
||||
"""
|
||||
@ -40,10 +40,12 @@ class ZaqarScenario(base.Scenario):
|
||||
queue.delete()
|
||||
|
||||
def _messages_post(self, queue, messages, min_msg_count, max_msg_count):
|
||||
"""Post a list of messages to a given Zaqar queue
|
||||
"""Post a list of messages to a given Zaqar queue.
|
||||
|
||||
:param queue: post the messages to queue
|
||||
:param messages: messages to post
|
||||
:param min_msg_count: minimum number of messages
|
||||
:param max_msg_count: maximum number of messages
|
||||
"""
|
||||
with base.AtomicAction(self, 'zaqar.post_between_%s_and_%s_messages' %
|
||||
(min_msg_count, max_msg_count)):
|
||||
@ -51,10 +53,10 @@ class ZaqarScenario(base.Scenario):
|
||||
|
||||
@base.atomic_action_timer('zaqar.list_messages')
|
||||
def _messages_list(self, queue):
|
||||
"""Gets messages from a given Zaqar queue
|
||||
"""Gets messages from a given Zaqar queue.
|
||||
|
||||
:param queue: get messages from queue
|
||||
:return: messages iterator
|
||||
:returns: messages iterator
|
||||
"""
|
||||
|
||||
return queue.messages()
|
||||
|
@ -92,7 +92,6 @@ class SSH(object):
|
||||
:param pkey: RSA or DSS private key string or file object
|
||||
:param key_filename: private key filename
|
||||
:param password: password
|
||||
|
||||
"""
|
||||
|
||||
self.user = user
|
||||
@ -234,8 +233,7 @@ class SSH(object):
|
||||
:param stdin: Open file to be sent on process stdin.
|
||||
:param timeout: Timeout for execution of the command.
|
||||
|
||||
Return tuple (exit_status, stdout, stderr)
|
||||
|
||||
:returns: tuple (exit_status, stdout, stderr)
|
||||
"""
|
||||
stdout = StringIO.StringIO()
|
||||
stderr = StringIO.StringIO()
|
||||
|
@ -382,12 +382,12 @@ def distance(s1, s2):
|
||||
|
||||
|
||||
def retry(times, func, *args, **kwargs):
|
||||
"""Tries to execute multiple times function mitigating exceptions.
|
||||
"""Try to execute multiple times function mitigating exceptions.
|
||||
|
||||
:param times: Amount of attempts to execute function
|
||||
:param func: Function that should be executed
|
||||
:param *args: *args that are passed to func
|
||||
:param **kwargs: **kwargs that are passed to func
|
||||
:param args: *args that are passed to func
|
||||
:param kwargs: **kwargs that are passed to func
|
||||
|
||||
:raises: Raise any exception that can raise func
|
||||
:returns: Result of func(*args, **kwargs)
|
||||
|
@ -40,8 +40,7 @@ class NovaServersTestCase(test.TestCase):
|
||||
scenario._delete_server = mock.MagicMock()
|
||||
|
||||
scenario.boot_and_bounce_server("img", 1, actions=actions)
|
||||
scenario._boot_server.assert_called_once_with("name", "img", 1,
|
||||
actions=actions)
|
||||
scenario._boot_server.assert_called_once_with("name", "img", 1)
|
||||
server_calls = []
|
||||
for i in range(5):
|
||||
server_calls.append(mock.call(fake_server))
|
||||
@ -66,8 +65,7 @@ class NovaServersTestCase(test.TestCase):
|
||||
|
||||
scenario.boot_and_bounce_server("img", 1, actions=actions)
|
||||
|
||||
scenario._boot_server.assert_called_once_with("name", "img", 1,
|
||||
actions=actions)
|
||||
scenario._boot_server.assert_called_once_with("name", "img", 1)
|
||||
server_calls = []
|
||||
for i in range(5):
|
||||
server_calls.append(mock.call(fake_server))
|
||||
@ -92,8 +90,7 @@ class NovaServersTestCase(test.TestCase):
|
||||
scenario._generate_random_name = mock.MagicMock(return_value='name')
|
||||
|
||||
scenario.boot_and_bounce_server("img", 1, actions=actions)
|
||||
scenario._boot_server.assert_called_once_with("name", "img", 1,
|
||||
actions=actions)
|
||||
scenario._boot_server.assert_called_once_with("name", "img", 1)
|
||||
server_calls = []
|
||||
for i in range(5):
|
||||
server_calls.append(mock.call(fake_server))
|
||||
@ -146,8 +143,7 @@ class NovaServersTestCase(test.TestCase):
|
||||
|
||||
scenario.boot_and_bounce_server("img", 1, actions=actions)
|
||||
|
||||
scenario._boot_server.assert_called_once_with("name", "img", 1,
|
||||
actions=actions)
|
||||
scenario._boot_server.assert_called_once_with("name", "img", 1)
|
||||
server_calls = []
|
||||
for i in range(5):
|
||||
server_calls.append(mock.call(fake_server))
|
||||
|
@ -21,6 +21,7 @@ from tests.unit import test
|
||||
|
||||
|
||||
class TestCriterion(base.SLA):
|
||||
"""Test SLA."""
|
||||
OPTION_NAME = "test_criterion"
|
||||
CONFIG_SCHEMA = {"type": "integer"}
|
||||
|
||||
|
@ -35,14 +35,26 @@ class ProviderMixIn(object):
|
||||
|
||||
|
||||
class ProviderA(ProviderMixIn, ProviderFactory):
|
||||
"""Fake server provider.
|
||||
|
||||
Used for tests.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class ProviderB(ProviderMixIn, ProviderFactory):
|
||||
"""Fake server provider.
|
||||
|
||||
Used for tests.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class ProviderC(ProviderB):
|
||||
"""Fake server provider.
|
||||
|
||||
Used for tests.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
|
@ -56,6 +56,10 @@ class FakeDeployment(object):
|
||||
|
||||
|
||||
class FakeEngine(deploy.EngineFactory):
|
||||
"""Fake deployment engine.
|
||||
|
||||
Used for tests.
|
||||
"""
|
||||
deployed = False
|
||||
cleanuped = False
|
||||
|
||||
@ -80,14 +84,26 @@ class EngineMixIn(object):
|
||||
|
||||
|
||||
class EngineFake1(EngineMixIn, deploy.EngineFactory):
|
||||
"""Fake deployment engine.
|
||||
|
||||
Used for tests.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class EngineFake2(EngineMixIn, deploy.EngineFactory):
|
||||
"""Fake deployment engine.
|
||||
|
||||
Used for tests.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class EngineFake3(EngineFake2):
|
||||
"""Fake deployment engine.
|
||||
|
||||
Used for tests.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
|
@ -1288,6 +1288,7 @@ class FakeRunner(object):
|
||||
|
||||
|
||||
class FakeScenario(base.Scenario):
|
||||
"""Fake Scenario class."""
|
||||
|
||||
def idle_time(self):
|
||||
return 0
|
||||
|
102
tests/unit/test_docstrings.py
Normal file
102
tests/unit/test_docstrings.py
Normal file
@ -0,0 +1,102 @@
|
||||
# Copyright 2014: Mirantis Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from rally.benchmark.scenarios import base
|
||||
from rally.benchmark.sla import base as sla_base
|
||||
from rally import deploy
|
||||
from rally.deploy import serverprovider
|
||||
from rally import utils
|
||||
from tests.unit import test
|
||||
|
||||
|
||||
class DocstringsTestCase(test.TestCase):
|
||||
|
||||
def test_all_scenarios_have_docstrings(self):
|
||||
ignored_params = ["self", "scenario_obj"]
|
||||
for scenario_group in utils.itersubclasses(base.Scenario):
|
||||
for method in dir(scenario_group):
|
||||
if base.Scenario.is_scenario(scenario_group, method):
|
||||
scenario = getattr(scenario_group, method)
|
||||
scenario_name = scenario_group.__name__ + "." + method
|
||||
self.assertIsNotNone(scenario.__doc__,
|
||||
"%s doensn't have a docstring." %
|
||||
scenario_name)
|
||||
doc = utils.parse_docstring(scenario.__doc__)
|
||||
short_description = doc["short_description"]
|
||||
self.assertIsNotNone(short_description,
|
||||
"Docstring for %s should have "
|
||||
"at least a one-line description." %
|
||||
scenario_name)
|
||||
self.assertFalse(short_description.startswith("Test"),
|
||||
"One-line description for %s "
|
||||
"should be declarative and not start "
|
||||
"with 'Test(s) ...'" % scenario_name)
|
||||
params_count = scenario.func_code.co_argcount
|
||||
params = scenario.func_code.co_varnames[:params_count]
|
||||
documented_params = [p["name"] for p in doc["params"]]
|
||||
for param in params:
|
||||
if param not in ignored_params:
|
||||
self.assertIn(param, documented_params,
|
||||
"Docstring for %(scenario)s should "
|
||||
"describe the '%(param)s' parameter "
|
||||
"in the :param <name>: clause." %
|
||||
{"scenario": scenario_name,
|
||||
"param": param})
|
||||
|
||||
def test_all_scenario_groups_have_docstrings(self):
|
||||
for scenario_group in utils.itersubclasses(base.Scenario):
|
||||
scenario_group_name = scenario_group.__name__
|
||||
self.assertIsNotNone(scenario_group.__doc__,
|
||||
"%s doesn't have a class-level docstring." %
|
||||
scenario_group_name)
|
||||
doc = utils.parse_docstring(scenario_group.__doc__)
|
||||
msg = ("Docstring for %s should have a one-line description." %
|
||||
scenario_group_name)
|
||||
self.assertIsNotNone(doc["short_description"], msg)
|
||||
|
||||
def test_all_deploy_engines_have_docstrings(self):
|
||||
for deploy_engine in utils.itersubclasses(deploy.EngineFactory):
|
||||
deploy_engine_name = deploy_engine.__name__
|
||||
self.assertIsNotNone(deploy_engine.__doc__,
|
||||
"%s doesn't have a class-level docstring." %
|
||||
deploy_engine_name)
|
||||
doc = utils.parse_docstring(deploy_engine.__doc__)
|
||||
msg = ("Docstring for %s should have a one-line description "
|
||||
"and a detailed description." % deploy_engine_name)
|
||||
self.assertIsNotNone(doc["short_description"], msg)
|
||||
self.assertIsNotNone(doc["long_description"], msg)
|
||||
|
||||
def test_all_server_providers_have_docstrings(self):
|
||||
for provider in utils.itersubclasses(serverprovider.ProviderFactory):
|
||||
provider_name = provider.__name__
|
||||
self.assertIsNotNone(provider.__doc__,
|
||||
"%s doesn't have a class-level docstring." %
|
||||
provider_name)
|
||||
doc = utils.parse_docstring(provider.__doc__)
|
||||
msg = ("Docstring for %s should have a one-line description "
|
||||
"and a detailed description." % provider_name)
|
||||
self.assertIsNotNone(doc["short_description"], msg)
|
||||
self.assertIsNotNone(doc["long_description"], msg)
|
||||
|
||||
def test_all_SLA_have_docstrings(self):
|
||||
for sla in utils.itersubclasses(sla_base.SLA):
|
||||
sla_name = sla.OPTION_NAME
|
||||
self.assertIsNotNone(sla.__doc__,
|
||||
"%s doesn't have a class-level docstring." %
|
||||
sla_name)
|
||||
doc = utils.parse_docstring(sla.__doc__)
|
||||
self.assertIsNotNone(doc["short_description"],
|
||||
"Docstring for %s should have a "
|
||||
"one-line description." % sla_name)
|
Loading…
x
Reference in New Issue
Block a user