Fix pep8 job according to latest set of rules
+ fix py34 Change-Id: Ie43ca6ce18928fc8a7c41fae916d44909308de70
This commit is contained in:
parent
028d9b3723
commit
7642d345bf
@ -18,9 +18,9 @@ import inspect
|
||||
|
||||
from docutils.parsers import rst
|
||||
|
||||
from . import utils
|
||||
from rally.cli import cliutils
|
||||
from rally.cli import main
|
||||
import utils
|
||||
|
||||
|
||||
class Parser(object):
|
||||
|
@ -18,11 +18,11 @@ from docutils.parsers import rst
|
||||
import json
|
||||
import re
|
||||
|
||||
from . import utils
|
||||
from rally.common.plugin import discover
|
||||
from rally.common.plugin import plugin
|
||||
from rally.common import validation
|
||||
from rally import plugins
|
||||
import utils
|
||||
|
||||
|
||||
JSON_SCHEMA_TYPES_MAP = {"boolean": "bool",
|
||||
|
@ -33,6 +33,7 @@ def parse_text(text):
|
||||
parser.parse(text, document)
|
||||
return document.children
|
||||
|
||||
|
||||
paragraph = lambda text: parse_text(text)[0]
|
||||
note = lambda msg: nodes.note("", paragraph(msg))
|
||||
hint = lambda msg: nodes.hint("", *parse_text(msg))
|
||||
|
@ -275,8 +275,8 @@ class _Task(APIGroup):
|
||||
# declared in jinja2.Environment.globals for both types of undeclared
|
||||
# variables and successfully renders templates in both cases.
|
||||
required_kwargs = jinja2.meta.find_undeclared_variables(ast)
|
||||
missing = (set(required_kwargs) - set(kwargs) - set(dir(builtins)) -
|
||||
set(env.globals))
|
||||
missing = (set(required_kwargs) - set(kwargs) - set(dir(builtins))
|
||||
- set(env.globals))
|
||||
real_missing = [mis for mis in missing
|
||||
if is_really_missing(mis, task_template)]
|
||||
if real_missing:
|
||||
@ -745,8 +745,8 @@ class _Verifier(APIGroup):
|
||||
verifier, verifier.status, consts.VerifierStatus.INSTALLED)
|
||||
)
|
||||
|
||||
system_wide_in_use = (system_wide or
|
||||
(system_wide is None and verifier.system_wide))
|
||||
system_wide_in_use = (
|
||||
system_wide or (system_wide is None and verifier.system_wide))
|
||||
if update_venv and system_wide_in_use:
|
||||
raise exceptions.RallyException(
|
||||
"It is impossible to update the virtual environment for "
|
||||
|
@ -195,8 +195,8 @@ def print_dict(obj, fields=None, formatters=None, mixed_case_fields=False,
|
||||
fields = sorted(obj.keys())
|
||||
else:
|
||||
fields = [name for name in dir(obj)
|
||||
if (not name.startswith("_") and
|
||||
not callable(getattr(obj, name)))]
|
||||
if (not name.startswith("_")
|
||||
and not callable(getattr(obj, name)))]
|
||||
|
||||
pt = prettytable.PrettyTable([property_label, value_label], caching=False)
|
||||
pt.align = "l"
|
||||
@ -222,9 +222,8 @@ def print_dict(obj, fields=None, formatters=None, mixed_case_fields=False,
|
||||
data = textwrap.fill(six.text_type(data), wrap)
|
||||
# if value has a newline, add in multiple rows
|
||||
# e.g. fault with stacktrace
|
||||
if (data and
|
||||
isinstance(data, six.string_types) and
|
||||
(r"\n" in data or "\r" in data)):
|
||||
if (data and isinstance(data, six.string_types)
|
||||
and (r"\n" in data or "\r" in data)):
|
||||
# "\r" would break the table, so remove it.
|
||||
if "\r" in data:
|
||||
data = data.replace("\r", "")
|
||||
@ -729,7 +728,7 @@ _rally()
|
||||
local cur="${COMP_WORDS[COMP_CWORD]}"
|
||||
local prev="${COMP_WORDS[COMP_CWORD-1]}"
|
||||
|
||||
if [[ $cur =~ ^(\.|\~|\/) ]] || [[ $prev =~ ^--out(|put-file)$ ]] ; then
|
||||
if [[ $cur =~ ^(\\.|\\~|\\/) ]] || [[ $prev =~ ^--out(|put-file)$ ]] ; then
|
||||
_rally_filedir
|
||||
elif [[ $prev =~ ^--(task|filename)$ ]] ; then
|
||||
_rally_filedir "\\.json|\\.yaml|\\.yml"
|
||||
|
@ -237,17 +237,17 @@ class DeploymentCommands(object):
|
||||
|
||||
info = api.deployment.check(deployment=deployment)
|
||||
for platform in info:
|
||||
for i, credentials in enumerate(info[platform]):
|
||||
for i, creds in enumerate(info[platform]):
|
||||
failed = False
|
||||
|
||||
n = "" if len(info[platform]) == 1 else " #%s" % (i + 1)
|
||||
header = "Platform %s%s:" % (platform, n)
|
||||
print(cliutils.make_header(header))
|
||||
if "admin_error" in credentials:
|
||||
print_error("admin", credentials["admin_error"])
|
||||
if "admin_error" in creds:
|
||||
print_error("admin", creds["admin_error"])
|
||||
failed = True
|
||||
if "user_error" in credentials:
|
||||
print_error("users", credentials["user_error"])
|
||||
if "user_error" in creds:
|
||||
print_error("users", creds["user_error"])
|
||||
failed = True
|
||||
|
||||
if not failed:
|
||||
@ -256,19 +256,19 @@ class DeploymentCommands(object):
|
||||
"Service": lambda x: x.get("name"),
|
||||
"Service Type": lambda x: x.get("type"),
|
||||
"Status": lambda x: x.get("status", "Available")}
|
||||
if (is_field_there(credentials["services"], "type") and
|
||||
is_field_there(credentials["services"], "name")):
|
||||
if (is_field_there(creds["services"], "type")
|
||||
and is_field_there(creds["services"], "name")):
|
||||
headers = ["Service", "Service Type", "Status"]
|
||||
else:
|
||||
headers = ["Service", "Status"]
|
||||
|
||||
if is_field_there(credentials["services"], "version"):
|
||||
if is_field_there(creds["services"], "version"):
|
||||
headers.append("Version")
|
||||
|
||||
if is_field_there(credentials["services"], "description"):
|
||||
if is_field_there(creds["services"], "description"):
|
||||
headers.append("Description")
|
||||
|
||||
cliutils.print_list(credentials["services"], headers,
|
||||
cliutils.print_list(creds["services"], headers,
|
||||
normalize_field_names=True,
|
||||
formatters=formatters)
|
||||
else:
|
||||
|
@ -630,10 +630,9 @@ class VerifyCommands(object):
|
||||
formatters = {
|
||||
"Started at": lambda v: v["created_at"].replace("T", " "),
|
||||
"Finished at": lambda v: v["updated_at"].replace("T", " "),
|
||||
"Duration": lambda v: (dt.datetime.strptime(v["updated_at"],
|
||||
TIME_FORMAT) -
|
||||
dt.datetime.strptime(v["created_at"],
|
||||
TIME_FORMAT)),
|
||||
"Duration": lambda v: (
|
||||
dt.datetime.strptime(v["updated_at"], TIME_FORMAT)
|
||||
- dt.datetime.strptime(v["created_at"], TIME_FORMAT)),
|
||||
"Run arguments": run_args_formatter,
|
||||
"Tags": lambda v: ", ".join(v["tags"]) or None,
|
||||
"Verifier name": lambda v: "%s (UUID: %s)" % (verifier["name"],
|
||||
@ -700,10 +699,9 @@ class VerifyCommands(object):
|
||||
deployment=v["deployment_uuid"])["name"]),
|
||||
"Started at": lambda v: v["created_at"],
|
||||
"Finished at": lambda v: v["updated_at"],
|
||||
"Duration": lambda v: (dt.datetime.strptime(v["updated_at"],
|
||||
TIME_FORMAT) -
|
||||
dt.datetime.strptime(v["created_at"],
|
||||
TIME_FORMAT))
|
||||
"Duration": lambda v:
|
||||
(dt.datetime.strptime(v["updated_at"], TIME_FORMAT)
|
||||
- dt.datetime.strptime(v["created_at"], TIME_FORMAT))
|
||||
}
|
||||
cliutils.print_list(verifications, fields, formatters=formatters,
|
||||
normalize_field_names=True, sortby_index=4)
|
||||
|
@ -137,13 +137,13 @@ def upgrade():
|
||||
# validation failed
|
||||
op.execute(
|
||||
task.update().where(
|
||||
(task.c.status == op.inline_literal("failed")) &
|
||||
(task.c.validation_result == {})).values(
|
||||
(task.c.status == op.inline_literal("failed"))
|
||||
& (task.c.validation_result == {})).values(
|
||||
{"new_status": "crashed", "validation_result": {}}))
|
||||
op.execute(
|
||||
task.update().where(
|
||||
(task.c.status == op.inline_literal("failed")) &
|
||||
(task.c.validation_result != {})).values(
|
||||
(task.c.status == op.inline_literal("failed"))
|
||||
& (task.c.validation_result != {})).values(
|
||||
{"new_status": "validation_failed",
|
||||
"validation_result": task.c.validation_result}))
|
||||
|
||||
|
@ -54,8 +54,8 @@ def _check_user_entry(user):
|
||||
keys = set(user.keys())
|
||||
if keys == {"username", "password", "tenant_name",
|
||||
"project_domain_name", "user_domain_name"}:
|
||||
if (user["user_domain_name"] == "" and
|
||||
user["project_domain_name"] == ""):
|
||||
if (user["user_domain_name"] == ""
|
||||
and user["project_domain_name"] == ""):
|
||||
# it is credentials of keystone v2 and they were created
|
||||
# --fromenv
|
||||
del user["user_domain_name"]
|
||||
|
@ -29,8 +29,8 @@ def prepare_input_args(func):
|
||||
if not test_id:
|
||||
return
|
||||
|
||||
if (test_id.startswith("setUpClass (") or
|
||||
test_id.startswith("tearDown (")):
|
||||
if (test_id.startswith("setUpClass (")
|
||||
or test_id.startswith("tearDown (")):
|
||||
test_id = test_id[test_id.find("(") + 1:-1]
|
||||
|
||||
tags = _parse_test_tags(test_id)
|
||||
@ -81,8 +81,8 @@ class SubunitV2StreamResult(object):
|
||||
return test_id.split("[")[0] if test_id.find("[") > -1 else test_id
|
||||
|
||||
def _check_expected_failure(self, test_id):
|
||||
if (test_id in self._expected_failures or
|
||||
self._get_test_name(test_id) in self._expected_failures):
|
||||
if (test_id in self._expected_failures
|
||||
or self._get_test_name(test_id) in self._expected_failures):
|
||||
if self._tests[test_id]["status"] == "fail":
|
||||
self._tests[test_id]["status"] = "xfail"
|
||||
if self._expected_failures[test_id]:
|
||||
@ -113,9 +113,9 @@ class SubunitV2StreamResult(object):
|
||||
# failed, there is only one event with reason and status. So we should
|
||||
# modify all tests of test class manually.
|
||||
for test_id in self._unknown_entities:
|
||||
known_test_ids = filter(lambda t:
|
||||
t == test_id or t.startswith(
|
||||
"%s." % test_id), self._tests)
|
||||
known_test_ids = filter(
|
||||
lambda t: t == test_id or t.startswith("%s." % test_id),
|
||||
self._tests)
|
||||
for t_id in known_test_ids:
|
||||
if self._tests[t_id]["status"] == "init":
|
||||
self._tests[t_id]["status"] = (
|
||||
|
@ -76,8 +76,8 @@ class Verification(object):
|
||||
self._update(status=status)
|
||||
|
||||
def finish(self, totals, tests):
|
||||
if (totals.get("failures", 0) == 0 and
|
||||
totals.get("unexpected_success", 0) == 0):
|
||||
if (totals.get("failures", 0) == 0
|
||||
and totals.get("unexpected_success", 0) == 0):
|
||||
status = consts.VerificationStatus.FINISHED
|
||||
else:
|
||||
status = consts.VerificationStatus.FAILED
|
||||
|
@ -102,9 +102,9 @@ class StdDevComputation(StreamingAlgorithm):
|
||||
self.mean = self.mean_computation.result()
|
||||
self.count += other.count
|
||||
|
||||
self.dev_sum = (dev_sum1 + count1 * mean1 ** 2 +
|
||||
dev_sum2 + count2 * mean2 ** 2 -
|
||||
self.count * self.mean ** 2)
|
||||
self.dev_sum = (dev_sum1 + count1 * mean1 ** 2
|
||||
+ dev_sum2 + count2 * mean2 ** 2
|
||||
- self.count * self.mean ** 2)
|
||||
|
||||
def result(self):
|
||||
# NOTE(amaretskiy): Need at least two values to be processed
|
||||
|
@ -265,11 +265,11 @@ class ElasticSearchExporter(exporter.TaskExporter):
|
||||
if itr["error"] and (
|
||||
# the case when it is a top level of the scenario and the
|
||||
# first fails the item which is not wrapped by AtomicTimer
|
||||
(not _parent and not atomic_actions) or
|
||||
(not _parent and not atomic_actions)
|
||||
# the case when it is a top level of the scenario and and
|
||||
# the item fails after some atomic actions completed
|
||||
(not _parent and atomic_actions and
|
||||
not atomic_actions[-1].get("failed", False))):
|
||||
or (not _parent and atomic_actions
|
||||
and not atomic_actions[-1].get("failed", False))):
|
||||
act_id = act_id_tmpl % {
|
||||
"itr_id": itr["id"],
|
||||
"action_name": "no-name-action",
|
||||
@ -278,8 +278,8 @@ class ElasticSearchExporter(exporter.TaskExporter):
|
||||
# Since the action had not be wrapped by AtomicTimer, we cannot
|
||||
# make any assumption about it's duration (start_time) so let's use
|
||||
# finished_at timestamp of iteration with 0 duration
|
||||
timestamp = (itr["timestamp"] + itr["duration"] +
|
||||
itr["idle_duration"])
|
||||
timestamp = (itr["timestamp"] + itr["duration"]
|
||||
+ itr["idle_duration"])
|
||||
timestamp = dt.datetime.utcfromtimestamp(timestamp)
|
||||
timestamp = timestamp.strftime(consts.TimeFormat.ISO8601)
|
||||
action_report = self._make_action_report(
|
||||
|
@ -61,9 +61,9 @@ class PeriodicTrigger(hook.HookTrigger):
|
||||
return self.config["unit"]
|
||||
|
||||
def on_event(self, event_type, value=None):
|
||||
if not (event_type == self.get_listening_event() and
|
||||
self.config["start"] <= value <= self.config["end"] and
|
||||
(value - self.config["start"]) % self.config["step"] == 0):
|
||||
if not (event_type == self.get_listening_event()
|
||||
and self.config["start"] <= value <= self.config["end"]
|
||||
and (value - self.config["start"]) % self.config["step"] == 0):
|
||||
# do nothing
|
||||
return
|
||||
super(PeriodicTrigger, self).on_event(event_type, value)
|
||||
|
@ -240,8 +240,8 @@ class RPSScenarioRunner(runner.ScenarioRunner):
|
||||
return float(rps_cfg) / number_of_processes
|
||||
stage_order = (time.time() - start_timer) / rps_cfg.get(
|
||||
"duration", 1) - 1
|
||||
rps = (float(rps_cfg["start"] + rps_cfg["step"] * stage_order) /
|
||||
number_of_processes)
|
||||
rps = (float(rps_cfg["start"] + rps_cfg["step"] * stage_order)
|
||||
/ number_of_processes)
|
||||
|
||||
return min(rps, float(rps_cfg["end"]))
|
||||
|
||||
|
@ -69,8 +69,8 @@ class Outliers(sla.SLA):
|
||||
self.iterations += 1
|
||||
|
||||
# NOTE(msdubov): First check if the current iteration is an outlier
|
||||
if ((self.iterations >= self.min_iterations and self.threshold and
|
||||
duration > self.threshold)):
|
||||
if (self.iterations >= self.min_iterations
|
||||
and self.threshold and duration > self.threshold):
|
||||
self.outliers += 1
|
||||
|
||||
# NOTE(msdubov): Then update the threshold value
|
||||
|
@ -105,9 +105,9 @@ class ResultConsumer(object):
|
||||
self.load_finished_at = max(r["duration"] + r["timestamp"],
|
||||
self.load_finished_at)
|
||||
success = self.sla_checker.add_iteration(r)
|
||||
if (self.abort_on_sla_failure and
|
||||
not success and
|
||||
not task_aborted):
|
||||
if (self.abort_on_sla_failure
|
||||
and not success
|
||||
and not task_aborted):
|
||||
self.sla_checker.set_aborted_on_sla()
|
||||
self.runner.abort()
|
||||
self.task.update_status(
|
||||
|
@ -305,8 +305,8 @@ class Trends(object):
|
||||
"config": workload_cfg}
|
||||
|
||||
self._data[key]["tasks"].append(task_uuid)
|
||||
if (self._data[key]["description"] and
|
||||
self._data[key]["description"] != w_description):
|
||||
if (self._data[key]["description"]
|
||||
and self._data[key]["description"] != w_description):
|
||||
self._data[key]["description"] = None
|
||||
|
||||
self._data[key]["sla_failures"] += not workload["pass_sla"]
|
||||
|
@ -45,8 +45,8 @@ class GraphZipper(object):
|
||||
order = self.point_order - int(self.compression_ratio / 2.0)
|
||||
|
||||
value = (
|
||||
sum(p[0] * p[1] for p in self.ratio_value_points) /
|
||||
self.compression_ratio
|
||||
sum(p[0] * p[1] for p in self.ratio_value_points)
|
||||
/ self.compression_ratio
|
||||
)
|
||||
|
||||
return [order, value]
|
||||
|
@ -142,9 +142,9 @@ class ServiceMeta(type):
|
||||
# properties of parents
|
||||
not_implemented_apis = set()
|
||||
for name, obj in inspect.getmembers(cls):
|
||||
if (getattr(obj, "require_impl", False) and
|
||||
if (getattr(obj, "require_impl", False)
|
||||
# name in namespace means that object was introduced in cls
|
||||
name not in namespaces):
|
||||
and name not in namespaces):
|
||||
# it is not overridden...
|
||||
not_implemented_apis.add(name)
|
||||
|
||||
@ -307,8 +307,8 @@ class UnifiedService(Service):
|
||||
# find all classes with unified implementation
|
||||
impls = {cls: cls._meta_get("impl")
|
||||
for cls in discover.itersubclasses(self.__class__)
|
||||
if (cls._meta_is_inited(raise_exc=False) and
|
||||
cls._meta_get("impl"))}
|
||||
if (cls._meta_is_inited(raise_exc=False)
|
||||
and cls._meta_get("impl"))}
|
||||
|
||||
service_names = {o._meta_get("name") for o in impls.values()}
|
||||
|
||||
@ -319,8 +319,8 @@ class UnifiedService(Service):
|
||||
enabled_services = list(self._clients.services().values())
|
||||
|
||||
for cls, impl in impls.items():
|
||||
if (enabled_services is not None and
|
||||
impl._meta_get("name") not in enabled_services):
|
||||
if (enabled_services is not None
|
||||
and impl._meta_get("name") not in enabled_services):
|
||||
continue
|
||||
if cls.is_applicable(self._clients):
|
||||
return cls, impls
|
||||
@ -356,8 +356,8 @@ class _Resource(object):
|
||||
|
||||
def __eq__(self, other):
|
||||
self_id = getattr(self, self._id_property)
|
||||
return (isinstance(other, self.__class__) and
|
||||
self_id == getattr(other, self._id_property))
|
||||
return (isinstance(other, self.__class__)
|
||||
and self_id == getattr(other, self._id_property))
|
||||
|
||||
def _as_dict(self):
|
||||
return dict((k, self[k]) for k in self.__slots__)
|
||||
|
@ -46,8 +46,9 @@ def get_status(resource, status_attr="status"):
|
||||
return status.upper()
|
||||
|
||||
# Dict case
|
||||
if ((isinstance(resource, dict) and status_attr in resource.keys() and
|
||||
isinstance(resource[status_attr], six.string_types))):
|
||||
if (isinstance(resource, dict)
|
||||
and status_attr in resource.keys()
|
||||
and isinstance(resource[status_attr], six.string_types)):
|
||||
return resource[status_attr].upper()
|
||||
|
||||
return "NONE"
|
||||
|
@ -17,6 +17,10 @@ import sys
|
||||
import six
|
||||
|
||||
|
||||
def _get_default_encoding():
|
||||
return sys.stdin.encoding or sys.getdefaultencoding()
|
||||
|
||||
|
||||
def safe_decode(text, incoming=None, errors="strict"):
|
||||
"""Decodes incoming string using `incoming` if they're not already unicode.
|
||||
|
||||
@ -34,8 +38,7 @@ def safe_decode(text, incoming=None, errors="strict"):
|
||||
return text
|
||||
|
||||
if not incoming:
|
||||
incoming = (sys.stdin.encoding or
|
||||
sys.getdefaultencoding())
|
||||
incoming = _get_default_encoding()
|
||||
|
||||
try:
|
||||
return text.decode(incoming, errors)
|
||||
@ -75,8 +78,7 @@ def safe_encode(text, incoming=None, encoding="utf-8", errors="strict"):
|
||||
raise TypeError("%s can't be encoded" % type(text))
|
||||
|
||||
if not incoming:
|
||||
incoming = (sys.stdin.encoding or
|
||||
sys.getdefaultencoding())
|
||||
incoming = _get_default_encoding()
|
||||
|
||||
# Avoid case issues in comparisons
|
||||
if hasattr(incoming, "lower"):
|
||||
|
@ -163,8 +163,8 @@ class VerifierManager(plugin.Plugin):
|
||||
raise exceptions.ValidationError(
|
||||
"'pattern' argument should be a string.")
|
||||
if "concurrency" in args:
|
||||
if (not isinstance(args["concurrency"], int) or
|
||||
args["concurrency"] < 0):
|
||||
if (not isinstance(args["concurrency"], int)
|
||||
or args["concurrency"] < 0):
|
||||
raise exceptions.ValidationError(
|
||||
"'concurrency' argument should be a positive integer or "
|
||||
"zero.")
|
||||
|
@ -48,15 +48,27 @@
|
||||
become: True
|
||||
become_user: root
|
||||
shell:
|
||||
executable: /bin/sh
|
||||
executable: /bin/bash
|
||||
cmd: |
|
||||
set -e
|
||||
curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py
|
||||
python{{ python_version.stdout }} get-pip.py
|
||||
if [ "{{ python_version.stdout }}" == "3.4" ]; then
|
||||
python{{ python_version.stdout }} get-pip.py --no-setuptools
|
||||
pip{{ python_version.stdout }} install setuptools==43.0.0
|
||||
else
|
||||
python{{ python_version.stdout }} get-pip.py
|
||||
fi
|
||||
|
||||
- name: Install python tox
|
||||
become: True
|
||||
become_user: root
|
||||
command: pip{{ python_version.stdout }} install tox
|
||||
shell:
|
||||
executable: /bin/bash
|
||||
cmd: |
|
||||
if [ "{{ python_version.stdout }}" == "3.4" ]; then
|
||||
pip{{ python_version.stdout }} install more-itertools==7.2.0 importlib-metadata==1.1.3 tox
|
||||
else
|
||||
pip{{ python_version.stdout }} install tox
|
||||
fi
|
||||
roles:
|
||||
- bindep
|
||||
|
@ -115,5 +115,6 @@ def main(args):
|
||||
if exit_code == 1:
|
||||
error("")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main(sys.argv))
|
||||
|
@ -120,8 +120,8 @@ class PYPIPackage(object):
|
||||
return self._license
|
||||
|
||||
def __eq__(self, other):
|
||||
return (isinstance(other, PYPIPackage) and
|
||||
self.package_name == other.package_name)
|
||||
return (isinstance(other, PYPIPackage)
|
||||
and self.package_name == other.package_name)
|
||||
|
||||
|
||||
class Requirement(PYPIPackage):
|
||||
@ -180,9 +180,11 @@ class Requirement(PYPIPackage):
|
||||
|
||||
min_equal_to_max = False
|
||||
if self.version["min"] and self.version["max"]:
|
||||
if (self.version["min"].startswith(">=") and
|
||||
self.version["max"].startswith("<=") and
|
||||
self.version["min"][2:] == self.version["max"][2:]):
|
||||
if (
|
||||
self.version["min"].startswith(">=")
|
||||
and self.version["max"].startswith("<=")
|
||||
and self.version["min"][2:] == self.version["max"][2:]
|
||||
):
|
||||
# min and max versions are equal there is no need to write
|
||||
# both of them
|
||||
min_equal_to_max = True
|
||||
@ -220,8 +222,8 @@ class Requirement(PYPIPackage):
|
||||
return string
|
||||
|
||||
def __eq__(self, other):
|
||||
return (isinstance(other, self.__class__) and
|
||||
self.package_name == other.package_name)
|
||||
return (isinstance(other, self.__class__)
|
||||
and self.package_name == other.package_name)
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self.__eq__(other)
|
||||
@ -273,15 +275,15 @@ def parse_data(raw_data, include_comments=True, dependency_cls=Requirement):
|
||||
requirements[-1].finish_him()
|
||||
requirements.append(Comment(finished=True))
|
||||
else:
|
||||
if (isinstance(requirements[-1], Comment) and
|
||||
not requirements[-1].is_finished):
|
||||
if (isinstance(requirements[-1], Comment)
|
||||
and not requirements[-1].is_finished):
|
||||
requirements[-1].finish_him()
|
||||
|
||||
# parse_line
|
||||
dep = dependency_cls.parse_line(line)
|
||||
if dep:
|
||||
if (isinstance(requirements[-1], Comment) and
|
||||
DO_NOT_TOUCH_TAG in str(requirements[-1])):
|
||||
if (isinstance(requirements[-1], Comment)
|
||||
and DO_NOT_TOUCH_TAG in str(requirements[-1])):
|
||||
dep.do_not_touch = True
|
||||
requirements.append(dep)
|
||||
|
||||
|
@ -52,6 +52,7 @@ def use_keystone_v3():
|
||||
"--name", "V3", "--file", cfg_file.name])
|
||||
print(subprocess.check_output(["rally", "deployment", "check"]))
|
||||
|
||||
|
||||
TAG_HANDLERS = {"v3": use_keystone_v3}
|
||||
|
||||
|
||||
|
@ -236,14 +236,14 @@ class TaskTestCase(testtools.TestCase):
|
||||
with open(json_report, "w+") as f:
|
||||
f.write(rally("task results", no_logs=True))
|
||||
import_print = rally("task import --file %s" % json_report)
|
||||
task_uuid = re.search("UUID:\s([a-z0-9\-]+)", import_print).group(1)
|
||||
task_uuid = re.search(r"UUID:\s([a-z0-9\-]+)", import_print).group(1)
|
||||
self.assertIn("Dummy.dummy_random_fail_in_atomic",
|
||||
rally("task results --uuid %s" % task_uuid))
|
||||
|
||||
# new json report
|
||||
rally("task report --json --out %s" % json_report, no_logs=True)
|
||||
import_print = rally("task import --file %s" % json_report)
|
||||
task_uuid = re.search("UUID:\s([a-z0-9\-]+)", import_print).group(1)
|
||||
task_uuid = re.search(r"UUID:\s([a-z0-9\-]+)", import_print).group(1)
|
||||
self.assertIn("Dummy.dummy_random_fail_in_atomic",
|
||||
rally("task report --uuid %s --json" % task_uuid))
|
||||
|
||||
@ -1453,7 +1453,7 @@ class HookTestCase(testtools.TestCase):
|
||||
with open(json_report, "w+") as f:
|
||||
f.write(rally("task results", no_logs=True))
|
||||
import_print = rally("task import --file %s" % json_report)
|
||||
task_uuid = re.search("UUID:\s([a-z0-9\-]+)", import_print).group(1)
|
||||
task_uuid = re.search(r"UUID:\s([a-z0-9\-]+)", import_print).group(1)
|
||||
results = rally("task results --uuid %s" % task_uuid)
|
||||
self.assertIn("Dummy.dummy", results)
|
||||
self.assertIn("event_hook", results)
|
||||
|
@ -244,8 +244,8 @@ def assert_equal_none(logical_line, physical_line, filename):
|
||||
|
||||
N322
|
||||
"""
|
||||
res = (re_assert_equal_start_with_none.search(logical_line) or
|
||||
re_assert_equal_end_with_none.search(logical_line))
|
||||
res = (re_assert_equal_start_with_none.search(logical_line)
|
||||
or re_assert_equal_end_with_none.search(logical_line))
|
||||
if res:
|
||||
yield (0, "N322 assertEqual(A, None) or assertEqual(None, A) "
|
||||
"sentences not allowed, you should use assertIsNone(A) "
|
||||
@ -262,8 +262,9 @@ def assert_true_or_false_with_in(logical_line, physical_line, filename):
|
||||
|
||||
N323
|
||||
"""
|
||||
res = (re_assert_true_false_with_in_or_not_in.search(logical_line) or
|
||||
re_assert_true_false_with_in_or_not_in_spaces.search(logical_line))
|
||||
res = (re_assert_true_false_with_in_or_not_in.search(logical_line)
|
||||
or re_assert_true_false_with_in_or_not_in_spaces.search(
|
||||
logical_line))
|
||||
if res:
|
||||
yield (0, "N323 assertTrue/assertFalse(A in/not in B)sentences not "
|
||||
"allowed, you should use assertIn(A, B) or assertNotIn(A, B)"
|
||||
@ -280,8 +281,8 @@ def assert_equal_in(logical_line, physical_line, filename):
|
||||
|
||||
N324
|
||||
"""
|
||||
res = (re_assert_equal_in_end_with_true_or_false.search(logical_line) or
|
||||
re_assert_equal_in_start_with_true_or_false.search(logical_line))
|
||||
res = (re_assert_equal_in_end_with_true_or_false.search(logical_line)
|
||||
or re_assert_equal_in_start_with_true_or_false.search(logical_line))
|
||||
if res:
|
||||
yield (0, "N324: Use assertIn/NotIn(A, B) rather than "
|
||||
"assertEqual(A in/not in B, True/False) when checking "
|
||||
@ -294,8 +295,8 @@ def assert_not_equal_none(logical_line, physical_line, filename):
|
||||
|
||||
N325
|
||||
"""
|
||||
res = (re_assert_not_equal_start_with_none.search(logical_line) or
|
||||
re_assert_not_equal_end_with_none.search(logical_line))
|
||||
res = (re_assert_not_equal_start_with_none.search(logical_line)
|
||||
or re_assert_not_equal_end_with_none.search(logical_line))
|
||||
if res:
|
||||
yield (0, "N325 assertNotEqual(A, None) or assertNotEqual(None, A) "
|
||||
"sentences not allowed, you should use assertIsNotNone(A) "
|
||||
@ -311,8 +312,8 @@ def assert_equal_true_or_false(logical_line, physical_line, filename):
|
||||
|
||||
N326
|
||||
"""
|
||||
res = (re_assert_equal_end_with_true_or_false.search(logical_line) or
|
||||
re_assert_equal_start_with_true_or_false.search(logical_line))
|
||||
res = (re_assert_equal_end_with_true_or_false.search(logical_line)
|
||||
or re_assert_equal_start_with_true_or_false.search(logical_line))
|
||||
if res:
|
||||
yield (0, "N326 assertEqual(A, True/False) or "
|
||||
"assertEqual(True/False, A) sentences not allowed,"
|
||||
@ -371,8 +372,8 @@ def check_quotes(logical_line, physical_line, filename):
|
||||
|
||||
check_tripple = (
|
||||
lambda line, i, char: (
|
||||
i + 2 < len(line) and
|
||||
(char == line[i] == line[i + 1] == line[i + 2])
|
||||
i + 2 < len(line)
|
||||
and (char == line[i] == line[i + 1] == line[i + 2])
|
||||
)
|
||||
)
|
||||
|
||||
@ -434,9 +435,9 @@ def check_dict_formatting_in_string(logical_line, tokens):
|
||||
# NOTE(stpierre): Can't use @skip_ignored_lines here because it's
|
||||
# a stupid decorator that only works on functions that take
|
||||
# (logical_line, filename) as arguments.
|
||||
if (not logical_line or
|
||||
logical_line.startswith("#") or
|
||||
logical_line.endswith("# noqa")):
|
||||
if (not logical_line
|
||||
or logical_line.startswith("#")
|
||||
or logical_line.endswith("# noqa")):
|
||||
return
|
||||
|
||||
current_string = ""
|
||||
|
@ -153,8 +153,9 @@ class TaskSampleTestCase(test.TestCase):
|
||||
bad_filenames = []
|
||||
for dirname, dirnames, filenames in os.walk(self.samples_path):
|
||||
for filename in filenames:
|
||||
if "_" in filename and (filename.endswith(".yaml") or
|
||||
filename.endswith(".json")):
|
||||
if "_" in filename and (
|
||||
filename.endswith(".yaml")
|
||||
or filename.endswith(".json")):
|
||||
full_path = os.path.join(dirname, filename)
|
||||
bad_filenames.append(full_path)
|
||||
|
||||
|
@ -144,7 +144,7 @@ class MigrationTestCase(rtest.DBTestCase,
|
||||
|
||||
def include_object(self, object_, name, type_, reflected, compare_to):
|
||||
if type_ == "table" and name == "alembic_version":
|
||||
return False
|
||||
return False
|
||||
|
||||
return super(MigrationTestCase, self).include_object(
|
||||
object_, name, type_, reflected, compare_to)
|
||||
@ -641,8 +641,9 @@ class MigrationWalkTestCase(rtest.DBTestCase,
|
||||
|
||||
subtasks_found = conn.execute(
|
||||
subtask_table.select().
|
||||
where(subtask_table.c.task_uuid ==
|
||||
self._e654a0648db0_task_uuid)
|
||||
where(
|
||||
subtask_table.c.task_uuid == self._e654a0648db0_task_uuid
|
||||
)
|
||||
).fetchall()
|
||||
|
||||
self.assertEqual(1, len(subtasks_found))
|
||||
@ -673,8 +674,8 @@ class MigrationWalkTestCase(rtest.DBTestCase,
|
||||
|
||||
workloads_found = conn.execute(
|
||||
workload_table.select().
|
||||
where(workload_table.c.task_uuid ==
|
||||
self._e654a0648db0_task_uuid)
|
||||
where(
|
||||
workload_table.c.task_uuid == self._e654a0648db0_task_uuid)
|
||||
).fetchall()
|
||||
|
||||
self.assertEqual(1, len(workloads_found))
|
||||
@ -714,8 +715,8 @@ class MigrationWalkTestCase(rtest.DBTestCase,
|
||||
|
||||
workloaddata_found = conn.execute(
|
||||
workloaddata_table.select().
|
||||
where(workloaddata_table.c.task_uuid ==
|
||||
self._e654a0648db0_task_uuid)
|
||||
where(workloaddata_table.c.task_uuid
|
||||
== self._e654a0648db0_task_uuid)
|
||||
).fetchall()
|
||||
|
||||
self.assertEqual(1, len(workloaddata_found))
|
||||
@ -754,19 +755,19 @@ class MigrationWalkTestCase(rtest.DBTestCase,
|
||||
|
||||
conn.execute(
|
||||
workloaddata_table.delete().
|
||||
where(workloaddata_table.c.task_uuid ==
|
||||
self._e654a0648db0_task_uuid)
|
||||
where(workloaddata_table.c.task_uuid
|
||||
== self._e654a0648db0_task_uuid)
|
||||
)
|
||||
|
||||
conn.execute(
|
||||
workload_table.delete().
|
||||
where(workload_table.c.task_uuid ==
|
||||
self._e654a0648db0_task_uuid)
|
||||
where(
|
||||
workload_table.c.task_uuid == self._e654a0648db0_task_uuid)
|
||||
)
|
||||
conn.execute(
|
||||
subtask_table.delete().
|
||||
where(subtask_table.c.task_uuid ==
|
||||
self._e654a0648db0_task_uuid)
|
||||
where(
|
||||
subtask_table.c.task_uuid == self._e654a0648db0_task_uuid)
|
||||
)
|
||||
|
||||
conn.execute(
|
||||
@ -776,8 +777,8 @@ class MigrationWalkTestCase(rtest.DBTestCase,
|
||||
|
||||
conn.execute(
|
||||
deployment_table.delete().
|
||||
where(deployment_table.c.uuid ==
|
||||
self._e654a0648db0_deployment_uuid)
|
||||
where(deployment_table.c.uuid
|
||||
== self._e654a0648db0_deployment_uuid)
|
||||
)
|
||||
|
||||
def _pre_upgrade_6ad4f426f005(self, engine):
|
||||
@ -857,55 +858,55 @@ class MigrationWalkTestCase(rtest.DBTestCase,
|
||||
)
|
||||
|
||||
def _pre_upgrade_32fada9b2fde(self, engine):
|
||||
self._32fada9b2fde_deployments = {
|
||||
# right config which should not be changed after migration
|
||||
"should-not-be-changed-1": {
|
||||
"admin": {"username": "admin",
|
||||
"password": "passwd",
|
||||
"project_name": "admin"},
|
||||
"auth_url": "http://example.com:5000/v3",
|
||||
"region_name": "RegionOne",
|
||||
"type": "ExistingCloud"},
|
||||
# right config which should not be changed after migration
|
||||
"should-not-be-changed-2": {
|
||||
"admin": {"username": "admin",
|
||||
"password": "passwd",
|
||||
"tenant_name": "admin"},
|
||||
"users": [{"username": "admin",
|
||||
"password": "passwd",
|
||||
"tenant_name": "admin"}],
|
||||
"auth_url": "http://example.com:5000/v2.0",
|
||||
"region_name": "RegionOne",
|
||||
"type": "ExistingCloud"},
|
||||
# not ExistingCloud config which should not be changed
|
||||
"should-not-be-changed-3": {
|
||||
"url": "example.com",
|
||||
"type": "Something"},
|
||||
# with `admin_domain_name` field
|
||||
"with_admin_domain_name": {
|
||||
"admin": {"username": "admin",
|
||||
"password": "passwd",
|
||||
"project_name": "admin",
|
||||
"admin_domain_name": "admin"},
|
||||
"auth_url": "http://example.com:5000/v3",
|
||||
"region_name": "RegionOne",
|
||||
"type": "ExistingCloud"},
|
||||
}
|
||||
deployment_table = db_utils.get_table(engine, "deployments")
|
||||
self._32fada9b2fde_deployments = {
|
||||
# right config which should not be changed after migration
|
||||
"should-not-be-changed-1": {
|
||||
"admin": {"username": "admin",
|
||||
"password": "passwd",
|
||||
"project_name": "admin"},
|
||||
"auth_url": "http://example.com:5000/v3",
|
||||
"region_name": "RegionOne",
|
||||
"type": "ExistingCloud"},
|
||||
# right config which should not be changed after migration
|
||||
"should-not-be-changed-2": {
|
||||
"admin": {"username": "admin",
|
||||
"password": "passwd",
|
||||
"tenant_name": "admin"},
|
||||
"users": [{"username": "admin",
|
||||
"password": "passwd",
|
||||
"tenant_name": "admin"}],
|
||||
"auth_url": "http://example.com:5000/v2.0",
|
||||
"region_name": "RegionOne",
|
||||
"type": "ExistingCloud"},
|
||||
# not ExistingCloud config which should not be changed
|
||||
"should-not-be-changed-3": {
|
||||
"url": "example.com",
|
||||
"type": "Something"},
|
||||
# with `admin_domain_name` field
|
||||
"with_admin_domain_name": {
|
||||
"admin": {"username": "admin",
|
||||
"password": "passwd",
|
||||
"project_name": "admin",
|
||||
"admin_domain_name": "admin"},
|
||||
"auth_url": "http://example.com:5000/v3",
|
||||
"region_name": "RegionOne",
|
||||
"type": "ExistingCloud"},
|
||||
}
|
||||
deployment_table = db_utils.get_table(engine, "deployments")
|
||||
|
||||
deployment_status = consts.DeployStatus.DEPLOY_FINISHED
|
||||
with engine.connect() as conn:
|
||||
for deployment in self._32fada9b2fde_deployments:
|
||||
conf = json.dumps(
|
||||
self._32fada9b2fde_deployments[deployment])
|
||||
conn.execute(
|
||||
deployment_table.insert(),
|
||||
[{"uuid": deployment, "name": deployment,
|
||||
"config": conf,
|
||||
"enum_deployments_status": deployment_status,
|
||||
"credentials": six.b(json.dumps([])),
|
||||
"users": six.b(json.dumps([]))
|
||||
}])
|
||||
deployment_status = consts.DeployStatus.DEPLOY_FINISHED
|
||||
with engine.connect() as conn:
|
||||
for deployment in self._32fada9b2fde_deployments:
|
||||
conf = json.dumps(
|
||||
self._32fada9b2fde_deployments[deployment])
|
||||
conn.execute(
|
||||
deployment_table.insert(),
|
||||
[{"uuid": deployment, "name": deployment,
|
||||
"config": conf,
|
||||
"enum_deployments_status": deployment_status,
|
||||
"credentials": six.b(json.dumps([])),
|
||||
"users": six.b(json.dumps([]))
|
||||
}])
|
||||
|
||||
def _check_32fada9b2fde(self, engine, data):
|
||||
self.assertEqual("32fada9b2fde",
|
||||
@ -956,75 +957,83 @@ class MigrationWalkTestCase(rtest.DBTestCase,
|
||||
)
|
||||
|
||||
def _pre_upgrade_484cd9413e66(self, engine):
|
||||
self._484cd9413e66_deployment_uuid = "484cd9413e66-deploy"
|
||||
self._484cd9413e66_deployment_uuid = "484cd9413e66-deploy"
|
||||
|
||||
self._484cd9413e66_verifications = [
|
||||
{"total": {"time": 1.0,
|
||||
"failures": 2,
|
||||
"skipped": 3,
|
||||
"success": 4,
|
||||
"errors": 0,
|
||||
"tests": 2
|
||||
},
|
||||
"test_cases": {"test1": {"status": "OK"},
|
||||
"test2": {"status": "FAIL",
|
||||
"failure": {"log": "trace"}}},
|
||||
"set_name": "full"},
|
||||
{"total": {"time": 2.0,
|
||||
"failures": 3,
|
||||
"skipped": 4,
|
||||
"success": 5,
|
||||
"unexpected_success": 6,
|
||||
"expected_failures": 7,
|
||||
"tests": 2
|
||||
},
|
||||
"test_cases": {"test1": {"status": "success"},
|
||||
"test2": {"status": "failed", ""
|
||||
"traceback": "trace"}},
|
||||
"set_name": "smoke"}
|
||||
]
|
||||
deployment_table = db_utils.get_table(engine, "deployments")
|
||||
verifications_table = db_utils.get_table(engine, "verifications")
|
||||
vresults_table = db_utils.get_table(engine,
|
||||
"verification_results")
|
||||
self._484cd9413e66_verifications = [
|
||||
{
|
||||
"total": {
|
||||
"time": 1.0,
|
||||
"failures": 2,
|
||||
"skipped": 3,
|
||||
"success": 4,
|
||||
"errors": 0,
|
||||
"tests": 2
|
||||
},
|
||||
"test_cases": {
|
||||
"test1": {"status": "OK"},
|
||||
"test2": {"status": "FAIL", "failure": {"log": "trace"}}
|
||||
},
|
||||
"set_name": "full"
|
||||
},
|
||||
{
|
||||
"total": {
|
||||
"time": 2.0,
|
||||
"failures": 3,
|
||||
"skipped": 4,
|
||||
"success": 5,
|
||||
"unexpected_success": 6,
|
||||
"expected_failures": 7,
|
||||
"tests": 2
|
||||
},
|
||||
"test_cases": {
|
||||
"test1": {"status": "success"},
|
||||
"test2": {"status": "failed", "traceback": "trace"}
|
||||
},
|
||||
"set_name": "smoke"
|
||||
}
|
||||
]
|
||||
deployment_table = db_utils.get_table(engine, "deployments")
|
||||
verifications_table = db_utils.get_table(engine, "verifications")
|
||||
vresults_table = db_utils.get_table(engine,
|
||||
"verification_results")
|
||||
|
||||
deployment_status = consts.DeployStatus.DEPLOY_FINISHED
|
||||
vstatus = consts.TaskStatus.FINISHED
|
||||
with engine.connect() as conn:
|
||||
deployment_status = consts.DeployStatus.DEPLOY_FINISHED
|
||||
vstatus = consts.TaskStatus.FINISHED
|
||||
with engine.connect() as conn:
|
||||
conn.execute(
|
||||
deployment_table.insert(),
|
||||
[{"uuid": self._484cd9413e66_deployment_uuid,
|
||||
"name": self._484cd9413e66_deployment_uuid,
|
||||
"config": six.b(json.dumps([])),
|
||||
"enum_deployments_status": deployment_status,
|
||||
"credentials": six.b(json.dumps([])),
|
||||
"users": six.b(json.dumps([]))
|
||||
}])
|
||||
|
||||
for i in range(len(self._484cd9413e66_verifications)):
|
||||
verification = self._484cd9413e66_verifications[i]
|
||||
vuuid = "uuid-%s" % i
|
||||
conn.execute(
|
||||
deployment_table.insert(),
|
||||
[{"uuid": self._484cd9413e66_deployment_uuid,
|
||||
"name": self._484cd9413e66_deployment_uuid,
|
||||
"config": six.b(json.dumps([])),
|
||||
"enum_deployments_status": deployment_status,
|
||||
"credentials": six.b(json.dumps([])),
|
||||
"users": six.b(json.dumps([]))
|
||||
verifications_table.insert(),
|
||||
[{"uuid": vuuid,
|
||||
"deployment_uuid":
|
||||
self._484cd9413e66_deployment_uuid,
|
||||
"status": vstatus,
|
||||
"set_name": verification["set_name"],
|
||||
"tests": verification["total"]["tests"],
|
||||
"failures": verification["total"]["failures"],
|
||||
"time": verification["total"]["time"],
|
||||
"errors": 0,
|
||||
}])
|
||||
data = copy.deepcopy(verification)
|
||||
data["total"]["test_cases"] = data["test_cases"]
|
||||
data = data["total"]
|
||||
conn.execute(
|
||||
vresults_table.insert(),
|
||||
[{"uuid": vuuid,
|
||||
"verification_uuid": vuuid,
|
||||
"data": json.dumps(data)
|
||||
}])
|
||||
|
||||
for i in range(len(self._484cd9413e66_verifications)):
|
||||
verification = self._484cd9413e66_verifications[i]
|
||||
vuuid = "uuid-%s" % i
|
||||
conn.execute(
|
||||
verifications_table.insert(),
|
||||
[{"uuid": vuuid,
|
||||
"deployment_uuid":
|
||||
self._484cd9413e66_deployment_uuid,
|
||||
"status": vstatus,
|
||||
"set_name": verification["set_name"],
|
||||
"tests": verification["total"]["tests"],
|
||||
"failures": verification["total"]["failures"],
|
||||
"time": verification["total"]["time"],
|
||||
"errors": 0,
|
||||
}])
|
||||
data = copy.deepcopy(verification)
|
||||
data["total"]["test_cases"] = data["test_cases"]
|
||||
data = data["total"]
|
||||
conn.execute(
|
||||
vresults_table.insert(),
|
||||
[{"uuid": vuuid,
|
||||
"verification_uuid": vuuid,
|
||||
"data": json.dumps(data)
|
||||
}])
|
||||
|
||||
def _check_484cd9413e66(self, engine, data):
|
||||
self.assertEqual("484cd9413e66",
|
||||
@ -1086,80 +1095,80 @@ class MigrationWalkTestCase(rtest.DBTestCase,
|
||||
deployment_table = db_utils.get_table(engine, "deployments")
|
||||
conn.execute(
|
||||
deployment_table.delete().where(
|
||||
deployment_table.c.uuid ==
|
||||
self._484cd9413e66_deployment_uuid)
|
||||
deployment_table.c.uuid
|
||||
== self._484cd9413e66_deployment_uuid)
|
||||
)
|
||||
|
||||
def _pre_upgrade_37fdbb373e8d(self, engine):
|
||||
self._37fdbb373e8d_deployment_uuid = "37fdbb373e8d-deployment"
|
||||
self._37fdbb373e8d_verifier_uuid = "37fdbb373e8d-verifier"
|
||||
self._37fdbb373e8d_verifications_tests = [
|
||||
{
|
||||
"test_1[smoke, negative]": {
|
||||
"name": "test_1",
|
||||
"time": 2.32,
|
||||
"status": "success",
|
||||
"tags": ["smoke", "negative"]
|
||||
},
|
||||
"test_2[smoke, negative]": {
|
||||
"name": "test_2",
|
||||
"time": 4.32,
|
||||
"status": "success",
|
||||
"tags": ["smoke", "negative"]
|
||||
}
|
||||
self._37fdbb373e8d_deployment_uuid = "37fdbb373e8d-deployment"
|
||||
self._37fdbb373e8d_verifier_uuid = "37fdbb373e8d-verifier"
|
||||
self._37fdbb373e8d_verifications_tests = [
|
||||
{
|
||||
"test_1[smoke, negative]": {
|
||||
"name": "test_1",
|
||||
"time": 2.32,
|
||||
"status": "success",
|
||||
"tags": ["smoke", "negative"]
|
||||
},
|
||||
{
|
||||
"test_3[smoke, negative]": {
|
||||
"name": "test_3",
|
||||
"time": 6.32,
|
||||
"status": "success",
|
||||
"tags": ["smoke", "negative"]
|
||||
},
|
||||
"test_4[smoke, negative]": {
|
||||
"name": "test_4",
|
||||
"time": 8.32,
|
||||
"status": "success",
|
||||
"tags": ["smoke", "negative"]
|
||||
}
|
||||
"test_2[smoke, negative]": {
|
||||
"name": "test_2",
|
||||
"time": 4.32,
|
||||
"status": "success",
|
||||
"tags": ["smoke", "negative"]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"test_3[smoke, negative]": {
|
||||
"name": "test_3",
|
||||
"time": 6.32,
|
||||
"status": "success",
|
||||
"tags": ["smoke", "negative"]
|
||||
},
|
||||
"test_4[smoke, negative]": {
|
||||
"name": "test_4",
|
||||
"time": 8.32,
|
||||
"status": "success",
|
||||
"tags": ["smoke", "negative"]
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
deployment_table = db_utils.get_table(engine, "deployments")
|
||||
verifiers_table = db_utils.get_table(engine, "verifiers")
|
||||
verifications_table = db_utils.get_table(engine, "verifications")
|
||||
deployment_table = db_utils.get_table(engine, "deployments")
|
||||
verifiers_table = db_utils.get_table(engine, "verifiers")
|
||||
verifications_table = db_utils.get_table(engine, "verifications")
|
||||
|
||||
deployment_status = consts.DeployStatus.DEPLOY_FINISHED
|
||||
with engine.connect() as conn:
|
||||
deployment_status = consts.DeployStatus.DEPLOY_FINISHED
|
||||
with engine.connect() as conn:
|
||||
conn.execute(
|
||||
deployment_table.insert(),
|
||||
[{"uuid": self._37fdbb373e8d_deployment_uuid,
|
||||
"name": self._37fdbb373e8d_deployment_uuid,
|
||||
"config": six.b(json.dumps([])),
|
||||
"enum_deployments_status": deployment_status,
|
||||
"credentials": six.b(json.dumps([])),
|
||||
"users": six.b(json.dumps([]))
|
||||
}])
|
||||
|
||||
conn.execute(
|
||||
verifiers_table.insert(),
|
||||
[{"uuid": self._37fdbb373e8d_verifier_uuid,
|
||||
"name": self._37fdbb373e8d_verifier_uuid,
|
||||
"type": "some-type",
|
||||
"status": consts.VerifierStatus.INSTALLED
|
||||
}])
|
||||
|
||||
for i in range(len(self._37fdbb373e8d_verifications_tests)):
|
||||
tests = self._37fdbb373e8d_verifications_tests[i]
|
||||
conn.execute(
|
||||
deployment_table.insert(),
|
||||
[{"uuid": self._37fdbb373e8d_deployment_uuid,
|
||||
"name": self._37fdbb373e8d_deployment_uuid,
|
||||
"config": six.b(json.dumps([])),
|
||||
"enum_deployments_status": deployment_status,
|
||||
"credentials": six.b(json.dumps([])),
|
||||
"users": six.b(json.dumps([]))
|
||||
verifications_table.insert(),
|
||||
[{"uuid": "verification-uuid-%s" % i,
|
||||
"deployment_uuid":
|
||||
self._37fdbb373e8d_deployment_uuid,
|
||||
"verifier_uuid": self._37fdbb373e8d_verifier_uuid,
|
||||
"status": consts.VerificationStatus.FINISHED,
|
||||
"tests": json.dumps(tests)
|
||||
}])
|
||||
|
||||
conn.execute(
|
||||
verifiers_table.insert(),
|
||||
[{"uuid": self._37fdbb373e8d_verifier_uuid,
|
||||
"name": self._37fdbb373e8d_verifier_uuid,
|
||||
"type": "some-type",
|
||||
"status": consts.VerifierStatus.INSTALLED
|
||||
}])
|
||||
|
||||
for i in range(len(self._37fdbb373e8d_verifications_tests)):
|
||||
tests = self._37fdbb373e8d_verifications_tests[i]
|
||||
conn.execute(
|
||||
verifications_table.insert(),
|
||||
[{"uuid": "verification-uuid-%s" % i,
|
||||
"deployment_uuid":
|
||||
self._37fdbb373e8d_deployment_uuid,
|
||||
"verifier_uuid": self._37fdbb373e8d_verifier_uuid,
|
||||
"status": consts.VerificationStatus.FINISHED,
|
||||
"tests": json.dumps(tests)
|
||||
}])
|
||||
|
||||
def _check_37fdbb373e8d(self, engine, data):
|
||||
self.assertEqual("37fdbb373e8d",
|
||||
db.schema.schema_revision(engine=engine))
|
||||
@ -1189,8 +1198,8 @@ class MigrationWalkTestCase(rtest.DBTestCase,
|
||||
deployment_table = db_utils.get_table(engine, "deployments")
|
||||
conn.execute(
|
||||
deployment_table.delete().where(
|
||||
deployment_table.c.uuid ==
|
||||
self._37fdbb373e8d_deployment_uuid)
|
||||
deployment_table.c.uuid
|
||||
== self._37fdbb373e8d_deployment_uuid)
|
||||
)
|
||||
|
||||
def _pre_upgrade_a6f364988fc2(self, engine):
|
||||
@ -1319,8 +1328,8 @@ class MigrationWalkTestCase(rtest.DBTestCase,
|
||||
deployment_table = db_utils.get_table(engine, "deployments")
|
||||
conn.execute(
|
||||
deployment_table.delete().where(
|
||||
deployment_table.c.uuid ==
|
||||
self._f33f4610dcda_deployment_uuid)
|
||||
deployment_table.c.uuid
|
||||
== self._f33f4610dcda_deployment_uuid)
|
||||
)
|
||||
|
||||
def _pre_upgrade_4ef544102ba7(self, engine):
|
||||
@ -1437,8 +1446,8 @@ class MigrationWalkTestCase(rtest.DBTestCase,
|
||||
deployment_table = db_utils.get_table(engine, "deployments")
|
||||
conn.execute(
|
||||
deployment_table.delete().where(
|
||||
deployment_table.c.uuid ==
|
||||
self._4ef544102ba7_deployment_uuid)
|
||||
deployment_table.c.uuid
|
||||
== self._4ef544102ba7_deployment_uuid)
|
||||
)
|
||||
|
||||
def _pre_upgrade_92aaaa2a6bb3(self, engine):
|
||||
|
@ -225,7 +225,6 @@ class TaskTestCase(test.TestCase):
|
||||
"soft": True, "status": consts.TaskStatus.INIT
|
||||
},
|
||||
{
|
||||
"soft": True, "status": consts.TaskStatus.VALIDATING,
|
||||
"soft": True, "status": consts.TaskStatus.ABORTED
|
||||
},
|
||||
{
|
||||
|
@ -86,8 +86,8 @@ class StdDevComputationTestCase(test.TestCase):
|
||||
for value in stream:
|
||||
std_computation.add(value)
|
||||
mean = float(sum(stream)) / len(stream)
|
||||
excepted_std = math.sqrt(sum((x - mean) ** 2 for x in stream) /
|
||||
(len(stream) - 1))
|
||||
excepted_std = math.sqrt(
|
||||
sum((x - mean) ** 2 for x in stream) / (len(stream) - 1))
|
||||
self.assertEqual(excepted_std, std_computation.result())
|
||||
|
||||
def test_merge(self):
|
||||
|
@ -339,8 +339,8 @@ class RandomNameTestCase(test.TestCase):
|
||||
self.assertFalse(utils.name_matches_object(name, One, Two))
|
||||
# ensure that exactly one of the two objects is checked
|
||||
self.assertItemsEqual(
|
||||
One.name_matches_object.call_args_list +
|
||||
Two.name_matches_object.call_args_list,
|
||||
One.name_matches_object.call_args_list
|
||||
+ Two.name_matches_object.call_args_list,
|
||||
[mock.call(name)])
|
||||
|
||||
def test_name_matches_object_differing_list(self):
|
||||
|
@ -38,7 +38,7 @@ class TestFormat(testtools.TestCase):
|
||||
if "http://" in line or "https://" in line or ":ref:" in line:
|
||||
continue
|
||||
# Allow lines which do not contain any whitespace
|
||||
if re.match("\s*[^\s]+$", line):
|
||||
if re.match(r"\s*[^\s]+$", line):
|
||||
continue
|
||||
if not text_inside_simple_tables:
|
||||
self.assertTrue(
|
||||
|
@ -149,8 +149,8 @@ class ConstantScenarioRunnerTestCase(test.TestCase):
|
||||
@mock.patch(RUNNERS + "constant.multiprocessing.Queue")
|
||||
@mock.patch(RUNNERS + "constant.multiprocessing.cpu_count")
|
||||
@mock.patch(RUNNERS + "constant.ConstantScenarioRunner._log_debug_info")
|
||||
@mock.patch(RUNNERS +
|
||||
"constant.ConstantScenarioRunner._create_process_pool")
|
||||
@mock.patch(
|
||||
RUNNERS + "constant.ConstantScenarioRunner._create_process_pool")
|
||||
@mock.patch(RUNNERS + "constant.ConstantScenarioRunner._join_processes")
|
||||
def test_that_cpu_count_is_adjusted_properly(
|
||||
self,
|
||||
|
@ -301,8 +301,7 @@ class RPSScenarioRunnerTestCase(test.TestCase):
|
||||
@mock.patch(RUNNERS + "constant.multiprocessing.Queue")
|
||||
@mock.patch(RUNNERS + "rps.multiprocessing.cpu_count")
|
||||
@mock.patch(RUNNERS + "rps.RPSScenarioRunner._log_debug_info")
|
||||
@mock.patch(RUNNERS +
|
||||
"rps.RPSScenarioRunner._create_process_pool")
|
||||
@mock.patch(RUNNERS + "rps.RPSScenarioRunner._create_process_pool")
|
||||
@mock.patch(RUNNERS + "rps.RPSScenarioRunner._join_processes")
|
||||
def test_that_cpu_count_is_adjusted_properly(
|
||||
self, mock__join_processes, mock__create_process_pool,
|
||||
|
@ -330,9 +330,9 @@ class TriggerTestCase(test.TestCase):
|
||||
self.assertEqual(len(right_values),
|
||||
hook_cls.return_value.run_async.call_count)
|
||||
hook_status = hook_cls.return_value.result.return_value["status"]
|
||||
res = [hook_cls.return_value.result.return_value] * len(right_values)
|
||||
self.assertEqual(
|
||||
{"config": cfg,
|
||||
"results": [hook_cls.return_value.result.return_value] *
|
||||
len(right_values),
|
||||
"results": res,
|
||||
"summary": {hook_status: len(right_values)}},
|
||||
dummy_trigger.get_results())
|
||||
|
@ -84,8 +84,8 @@ class DDTDecoratorCheckerTestCase(test.TestCase):
|
||||
|
||||
for dirname, dirnames, filenames in os.walk(self.tests_path):
|
||||
for filename in filenames:
|
||||
if not (filename.startswith("test_") and
|
||||
filename.endswith(".py")):
|
||||
if not (filename.startswith("test_")
|
||||
and filename.endswith(".py")):
|
||||
continue
|
||||
|
||||
filename = os.path.relpath(os.path.join(dirname, filename))
|
||||
|
@ -192,8 +192,8 @@ class FuncMockArgsDecoratorsChecker(ast.NodeVisitor):
|
||||
if funcname == "mock.patch":
|
||||
decname = self._get_value(decorator.args[0])
|
||||
elif funcname == "mock.patch.object":
|
||||
decname = (self._get_name(decorator.args[0]) + "." +
|
||||
self._get_value(decorator.args[1]))
|
||||
decname = (self._get_name(decorator.args[0]) + "."
|
||||
+ self._get_value(decorator.args[1]))
|
||||
else:
|
||||
continue
|
||||
|
||||
@ -306,8 +306,8 @@ class MockUsageCheckerTestCase(test.TestCase):
|
||||
|
||||
for dirname, dirnames, filenames in os.walk(self.tests_path):
|
||||
for filename in filenames:
|
||||
if (not filename.startswith("test_") or
|
||||
not filename.endswith(".py")):
|
||||
if (not filename.startswith("test_")
|
||||
or not filename.endswith(".py")):
|
||||
continue
|
||||
|
||||
filename = os.path.relpath(os.path.join(dirname, filename))
|
||||
|
3
tox.ini
3
tox.ini
@ -2,6 +2,7 @@
|
||||
minversion = 2.0
|
||||
skipsdist = True
|
||||
envlist = py35,py34,py27,pep8,samples
|
||||
ignore_basepython_conflict = true
|
||||
|
||||
[testenv]
|
||||
extras = {env:RALLY_EXTRAS:}
|
||||
@ -23,7 +24,7 @@ commands =
|
||||
find . -type f -name "*.pyc" -delete
|
||||
python {toxinidir}/tests/ci/pytest_launcher.py tests/unit --posargs={posargs}
|
||||
distribute = false
|
||||
basepython = python2.7
|
||||
basepython = python3
|
||||
passenv = PYTEST_REPORT http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY
|
||||
|
||||
[testenv:pep8]
|
||||
|
@ -18,3 +18,5 @@ Sphinx==1.8.5;python_version=='2.7'
|
||||
Sphinx==1.8.5;python_version=='3.4'
|
||||
SQLAlchemy===1.3.10
|
||||
virtualenv===16.7.7
|
||||
importlib-metadata==1.1.3;python_version=='3.4'
|
||||
more-itertools==7.2.0;python_version=='3.4'
|
||||
|
Loading…
Reference in New Issue
Block a user