Merge "[CLI] Rework commands.task.TaskCommands.detailed"

This commit is contained in:
Jenkins 2016-03-10 14:36:25 +00:00 committed by Gerrit Code Review
commit b4253e17d2
4 changed files with 141 additions and 326 deletions

View File

@ -38,7 +38,6 @@ from rally import consts
from rally import exceptions from rally import exceptions
from rally import plugins from rally import plugins
from rally.task.processing import plot from rally.task.processing import plot
from rally.task.processing import utils
class FailedToLoadTask(exceptions.RallyException): class FailedToLoadTask(exceptions.RallyException):
@ -296,69 +295,36 @@ class TaskCommands(object):
help="Print detailed results for each iteration.") help="Print detailed results for each iteration.")
@envutils.with_default_task_id @envutils.with_default_task_id
def detailed(self, task_id=None, iterations_data=False): def detailed(self, task_id=None, iterations_data=False):
"""Display results table. """Print detailed information about given task.
:param task_id: Task uuid :param task_id: str, task uuid
:param iterations_data: print detailed results for each iteration :param iterations_data: bool, include results for each iteration
Prints detailed information of task.
""" """
task = api.Task.get_detailed(task_id, extended_results=True)
def _print_iterations_data(result): if not task:
raw_data = result["data"]["raw"] print("The task %s can not be found" % task_id)
headers = ["iteration", "full duration"] return 1
float_cols = ["full duration"]
atomic_actions = []
for row in raw_data:
# find first non-error result to get atomic actions names
if not row["error"] and "atomic_actions" in row:
atomic_actions = row["atomic_actions"].keys()
for row in raw_data:
if row["atomic_actions"]:
for (c, a) in enumerate(atomic_actions, 1):
action = "%(no)i. %(action)s" % {"no": c, "action": a}
headers.append(action)
float_cols.append(action)
break
table_rows = []
formatters = dict(zip(float_cols,
[cliutils.pretty_float_formatter(col, 3)
for col in float_cols]))
for (c, r) in enumerate(raw_data, 1):
dlist = [c]
dlist.append(r["duration"])
if r["atomic_actions"]:
for action in atomic_actions:
dlist.append(r["atomic_actions"].get(action) or 0)
table_rows.append(rutils.Struct(**dict(zip(headers,
dlist))))
cliutils.print_list(table_rows,
fields=headers,
formatters=formatters)
print()
def _print_task_info(task): print()
print() print("-" * 80)
print(_("Task %(task_id)s: %(status)s")
% {"task_id": task_id, "status": task["status"]})
if task["status"] == consts.TaskStatus.FAILED:
print("-" * 80) print("-" * 80)
print(_("Task %(task_id)s: %(status)s") verification = yaml.safe_load(task["verification_log"])
% {"task_id": task_id, "status": task["status"]})
if task["status"] == consts.TaskStatus.FAILED: if logging.is_debug():
print("-" * 80) print(yaml.safe_load(verification[2]))
verification = yaml.safe_load(task["verification_log"]) else:
print(verification[0])
print(verification[1])
print(_("\nFor more details run:\nrally -vd task detailed %s")
% task["uuid"])
return 0
if not logging.is_debug(): for result in task["results"]:
print(verification[0])
print(verification[1])
print()
print(_("For more details run:\n"
"rally -vd task detailed %s")
% task["uuid"])
else:
print(yaml.safe_load(verification[2]))
return False
return True
def _print_scenario_args(result):
key = result["key"] key = result["key"]
print("-" * 80) print("-" * 80)
print() print()
@ -366,145 +332,107 @@ class TaskCommands(object):
print("args position %s" % key["pos"]) print("args position %s" % key["pos"])
print("args values:") print("args values:")
print(json.dumps(key["kw"], indent=2)) print(json.dumps(key["kw"], indent=2))
print()
def _print_summrized_result(result): durations = plot.charts.MainStatsTable(result["info"])
raw = result["data"]["raw"] iterations = []
table_cols = ["action", "min", "median", iterations_headers = ["iteration", "full duration"]
"90%ile", "95%ile", "max", iterations_actions = []
"avg", "success", "count"] output = []
float_cols = ["min", "median",
"90%ile", "95%ile", "max",
"avg"]
formatters = dict(zip(float_cols,
[cliutils.pretty_float_formatter(col, 3)
for col in float_cols]))
table_rows = []
actions_data = utils.get_atomic_actions_data(raw) if iterations_data:
for action in actions_data: for i, atomic_name in enumerate(result["info"]["atomic"], 1):
durations = actions_data[action] action = "%i. %s" % (i, atomic_name)
if durations: iterations_headers.append(action)
data = [action, iterations_actions.append((atomic_name, action))
round(min(durations), 3),
round(utils.median(durations), 3), for idx, itr in enumerate(result["iterations"], 1):
round(utils.percentile(durations, 0.90), 3), durations.add_iteration(itr)
round(utils.percentile(durations, 0.95), 3),
round(max(durations), 3), if iterations_data:
round(utils.mean(durations), 3), row = {"iteration": idx,
"%.1f%%" % (len(durations) * 100.0 / len(raw)), "full duration": itr["duration"]}
len(raw)] for name, action in iterations_actions:
row[action] = itr["atomic_actions"].get(name, 0)
iterations.append(row)
if "output" in itr:
iteration_output = itr["output"]
else: else:
data = [action, None, None, None, None, None, None, iteration_output = {"additive": [], "complete": []}
"0.0%", len(raw)]
table_rows.append(rutils.Struct(**dict(zip(table_cols,
data))))
cliutils.print_list(table_rows, fields=table_cols,
formatters=formatters,
table_label="Response Times (sec)",
sortby_index=None)
def _print_ssrs_result(result):
raw = result["data"]["raw"]
# NOTE(hughsaunders): ssrs=scenario specific results
ssrs = []
for itr in raw:
if "output" not in itr:
itr["output"] = {"additive": [], "complete": []}
# NOTE(amaretskiy): "scenario_output" is supported # NOTE(amaretskiy): "scenario_output" is supported
# for backward compatibility # for backward compatibility
if ("scenario_output" in itr if ("scenario_output" in itr
and itr["scenario_output"]["data"]): and itr["scenario_output"]["data"]):
itr["output"]["additive"].append( iteration_output["additive"].append(
{"data": itr["scenario_output"]["data"].items(), {"data": itr["scenario_output"]["data"].items(),
"title": "Scenario output", "title": "Scenario output",
"description": "", "description": "",
"chart_plugin": "StackedArea"}) "chart_plugin": "StackedArea"})
del itr["scenario_output"]
for idx, additive in enumerate(itr["output"]["additive"]): for idx, additive in enumerate(iteration_output["additive"]):
try: if len(output) <= idx + 1:
for key, value in additive["data"]: output_table = plot.charts.OutputStatsTable(
ssrs[idx]["data"][key].append(value) result["info"], title=additive["title"])
except IndexError: output.append(output_table)
data = {} output[idx].add_iteration(additive["data"])
keys = []
for key, value in additive["data"]:
if key not in data:
data[key] = []
keys.append(key)
data[key].append(value)
ssrs.append({"title": additive["title"],
"keys": keys,
"data": data})
if not ssrs:
return
print("\nScenario Specific Results\n") cols = plot.charts.MainStatsTable.columns
float_cols = cols[1:7]
headers = ["key", "min", "median", "90%ile", "95%ile",
"max", "avg"]
float_cols = ["min", "median", "90%ile", "95%ile", "max", "avg"]
formatters = dict(zip(float_cols, formatters = dict(zip(float_cols,
[cliutils.pretty_float_formatter(col, 3) [cliutils.pretty_float_formatter(col, 3)
for col in float_cols])) for col in float_cols]))
rows = [dict(zip(cols, r)) for r in durations.render()["rows"]]
cliutils.print_list(rows,
fields=cols,
formatters=formatters,
table_label="Response Times (sec)",
sortby_index=None)
print()
for ssr in ssrs: if iterations_data:
rows = [] formatters = dict(zip(iterations_headers[1:],
for key in ssr["keys"]: [cliutils.pretty_float_formatter(col, 3)
values = ssr["data"][key] for col in iterations_headers[1:]]))
cliutils.print_list(iterations,
if values: fields=iterations_headers,
row = [str(key), table_label="Atomics per iteration",
round(min(values), 3), formatters=formatters)
round(utils.median(values), 3),
round(utils.percentile(values, 0.90), 3),
round(utils.percentile(values, 0.95), 3),
round(max(values), 3),
round(utils.mean(values), 3)]
else:
row = [str(key)] + ["n/a"] * 6
rows.append(rutils.Struct(**dict(zip(headers, row))))
cliutils.print_list(rows,
fields=headers,
formatters=formatters,
table_label=ssr["title"])
print() print()
def _print_hints(task): if output:
print() cols = plot.charts.OutputStatsTable.columns
print("HINTS:") float_cols = cols[1:7]
formatters = dict(zip(float_cols,
[cliutils.pretty_float_formatter(col, 3)
for col in float_cols]))
for out in output:
data = out.render()
rows = [dict(zip(cols, r)) for r in data["data"]["rows"]]
if rows:
# NOTE(amaretskiy): print title explicitly because
# prettytable fails if title length is too long
print(data["title"])
cliutils.print_list(rows, fields=cols,
formatters=formatters)
print()
print(_("Load duration: %s") %
result["info"]["load_duration"])
print(_("Full duration: %s") %
result["info"]["full_duration"])
print("\nHINTS:")
print(_("* To plot HTML graphics with this data, run:")) print(_("* To plot HTML graphics with this data, run:"))
print("\trally task report %s --out output.html" % task["uuid"]) print("\trally task report %s --out output.html\n" % task["uuid"])
print()
print(_("* To generate a JUnit report, run:")) print(_("* To generate a JUnit report, run:"))
print("\trally task report %s --junit --out output.xml" % print("\trally task report %s --junit --out output.xml\n" %
task["uuid"]) task["uuid"])
print()
print(_("* To get raw JSON output of task results, run:")) print(_("* To get raw JSON output of task results, run:"))
print("\trally task results %s\n" % task["uuid"]) print("\trally task results %s\n" % task["uuid"])
task = api.Task.get_detailed(task_id)
if task is None:
print("The task %s can not be found" % task_id)
return(1)
if _print_task_info(task):
for result in task["results"]:
_print_scenario_args(result)
_print_summrized_result(result)
if iterations_data:
_print_iterations_data(result)
_print_ssrs_result(result)
print(_("Load duration: %s") %
result["data"]["load_duration"])
print(_("Full duration: %s") %
result["data"]["full_duration"])
_print_hints(task)
@cliutils.args("--uuid", type=str, dest="task_id", help="UUID of task.") @cliutils.args("--uuid", type=str, dest="task_id", help="UUID of task.")
@envutils.with_default_task_id @envutils.with_default_task_id
@cliutils.suppress_warnings @cliutils.suppress_warnings

View File

@ -15,44 +15,9 @@
import math import math
from rally.common import costilius
from rally.common.i18n import _
from rally import exceptions
def mean(values):
"""Find the simple mean of a list of values.
:parameter values: non-empty list of numbers
:returns: float value
"""
if not values:
raise exceptions.InvalidArgumentsException(
"the list should be non-empty")
return math.fsum(values) / len(values)
def median(values):
"""Find the simple median of a list of values.
:parameter values: non-empty list of numbers
:returns: float value
"""
if not values:
raise ValueError(_("no median for empty data"))
values = sorted(values)
size = len(values)
if size % 2 == 1:
return values[size // 2]
else:
index = size // 2
return (values[index - 1] + values[index]) / 2.0
# NOTE(amaretskiy): this is used only by rally.common.streaming_algorithms
# so it is reasonable to move it there
def percentile(values, percent): def percentile(values, percent):
"""Find the percentile of a list of values. """Find the percentile of a list of values.
@ -74,32 +39,6 @@ def percentile(values, percent):
return (d0 + d1) return (d0 + d1)
# TODO(amaretskiy): This function is deprecated and should be removed
# after it becomes not used by rally.cli.commands.task
def get_atomic_actions_data(raw_data):
"""Retrieve detailed (by atomic actions & total runtime) benchmark data.
:parameter raw_data: list of raw records (scenario runner output)
:returns: dictionary containing atomic action + total duration lists
for all atomic action keys
"""
atomic_actions = []
for row in raw_data:
# find first non-error result to get atomic actions names
if not row["error"] and "atomic_actions" in row:
atomic_actions = row["atomic_actions"].keys()
break
actions_data = costilius.OrderedDict()
for atomic_action in atomic_actions:
actions_data[atomic_action] = [
r["atomic_actions"][atomic_action]
for r in raw_data
if r["atomic_actions"].get(atomic_action) is not None]
actions_data["total"] = [r["duration"] for r in raw_data if not r["error"]]
return actions_data
class GraphZipper(object): class GraphZipper(object):
def __init__(self, base_size, zipped_size=1000): def __init__(self, base_size, zipped_size=1000):

View File

@ -262,7 +262,7 @@ class TaskCommandsTestCase(test.TestCase):
@mock.patch("rally.cli.commands.task.api.Task") @mock.patch("rally.cli.commands.task.api.Task")
def test_detailed(self, mock_task): def test_detailed(self, mock_task):
test_uuid = "c0d874d4-7195-4fd5-8688-abe82bfad36f" test_uuid = "c0d874d4-7195-4fd5-8688-abe82bfad36f"
value = { mock_task.get_detailed.return_value = {
"id": "task", "id": "task",
"uuid": test_uuid, "uuid": test_uuid,
"status": "status", "status": "status",
@ -273,64 +273,46 @@ class TaskCommandsTestCase(test.TestCase):
"pos": "fake_pos", "pos": "fake_pos",
"kw": "fake_kw" "kw": "fake_kw"
}, },
"data": { "info": {
"load_duration": 1.0, "load_duration": 3.2,
"full_duration": 2.0, "full_duration": 3.5,
"raw": [ "iterations_count": 4,
{ "atomic": {"foo": {}, "bar": {}}},
"duration": 0.9, "iterations": [
"idle_duration": 0.5, {"duration": 0.9,
"scenario_output": { "idle_duration": 0.1,
"data": { "output": {"additive": [], "complete": []},
"a": 3 "atomic_actions": {"foo": 0.6, "bar": 0.7},
}, "error": ["type", "message", "traceback"]
"errors": "some" },
}, {"duration": 1.2,
"atomic_actions": { "idle_duration": 0.3,
"a": 0.6, "output": {"additive": [], "complete": []},
"b": 0.7 "atomic_actions": {"foo": 0.6, "bar": 0.7},
}, "error": ["type", "message", "traceback"]
"error": ["type", "message", "traceback"] },
}, {"duration": 0.7,
{ "idle_duration": 0.5,
"duration": 0.5, "scenario_output": {
"idle_duration": 0.2, "data": {"foo": 0.6, "bar": 0.7},
"scenario_output": { "errors": "some"
"data": { },
"a": 1 "atomic_actions": {"foo": 0.6, "bar": 0.7},
}, "error": ["type", "message", "traceback"]
"errors": "some" },
}, {"duration": 0.5,
"atomic_actions": { "idle_duration": 0.5,
"a": 0.2, "output": {"additive": [], "complete": []},
"b": 0.4 "atomic_actions": {"foo": 0.6, "bar": 0.7},
}, "error": ["type", "message", "traceback"]
"error": None }
}, ]
{
"duration": 0.6,
"idle_duration": 0.4,
"scenario_output": {
"data": {
"a": 2
},
"errors": None
},
"atomic_actions": {
"a": 0.3,
"b": 0.5
},
"error": None
}
]
}
} }
] ]
} }
mock_task.get_detailed = mock.MagicMock(return_value=value)
self.task.detailed(test_uuid) self.task.detailed(test_uuid)
mock_task.get_detailed.assert_called_once_with(test_uuid) mock_task.get_detailed.assert_called_once_with(test_uuid,
extended_results=True)
self.task.detailed(test_uuid, iterations_data=True) self.task.detailed(test_uuid, iterations_data=True)
@mock.patch("rally.cli.commands.task.api.Task") @mock.patch("rally.cli.commands.task.api.Task")
@ -362,7 +344,8 @@ class TaskCommandsTestCase(test.TestCase):
test_uuid = "eb290c30-38d8-4c8f-bbcc-fc8f74b004ae" test_uuid = "eb290c30-38d8-4c8f-bbcc-fc8f74b004ae"
mock_task.get_detailed = mock.MagicMock(return_value=None) mock_task.get_detailed = mock.MagicMock(return_value=None)
self.task.detailed(test_uuid) self.task.detailed(test_uuid)
mock_task.get_detailed.assert_called_once_with(test_uuid) mock_task.get_detailed.assert_called_once_with(test_uuid,
extended_results=True)
@mock.patch("json.dumps") @mock.patch("json.dumps")
@mock.patch("rally.cli.commands.task.api.Task.get") @mock.patch("rally.cli.commands.task.api.Task.get")

View File

@ -15,7 +15,6 @@
import ddt import ddt
from rally import exceptions
from rally.task.processing import utils from rally.task.processing import utils
from tests.unit import test from tests.unit import test
@ -36,40 +35,6 @@ class MathTestCase(test.TestCase):
result = utils.percentile(lst, 1) result = utils.percentile(lst, 1)
self.assertEqual(result, 100) self.assertEqual(result, 100)
def test_mean(self):
lst = list(range(1, 100))
result = utils.mean(lst)
self.assertEqual(result, 50.0)
def test_mean_empty_list(self):
lst = []
self.assertRaises(exceptions.InvalidArgumentsException,
utils.mean, lst)
def test_median_single_value(self):
lst = [5]
result = utils.median(lst)
self.assertEqual(5, result)
def test_median_odd_sized_list(self):
lst = [1, 2, 3, 4, 5]
result = utils.median(lst)
self.assertEqual(3, result)
def test_median_even_sized_list(self):
lst = [1, 2, 3, 4]
result = utils.median(lst)
self.assertEqual(2.5, result)
def test_median_empty_list(self):
lst = []
self.assertRaises(ValueError,
utils.median, lst)
lst = None
self.assertRaises(ValueError,
utils.median, lst)
@ddt.ddt @ddt.ddt
class GraphZipperTestCase(test.TestCase): class GraphZipperTestCase(test.TestCase):