Add scoring engines to database and API layers
A Scoring Module needs to expose a list of available scoring engines through API and Watcher CLI. This list is stored in database and synchronized by Decision Engine. Partially-Implements: blueprint scoring-module Change-Id: I32168adeaf34fd12a731204c5b58fe68434ad087 APIImpact
This commit is contained in:
parent
b2656b92c4
commit
26d84e353e
@ -31,6 +31,10 @@
|
|||||||
"goal:get": "rule:default",
|
"goal:get": "rule:default",
|
||||||
"goal:get_all": "rule:default",
|
"goal:get_all": "rule:default",
|
||||||
|
|
||||||
|
"scoring_engine:detail": "rule:default",
|
||||||
|
"scoring_engine:get": "rule:default",
|
||||||
|
"scoring_engine:get_all": "rule:default",
|
||||||
|
|
||||||
"strategy:detail": "rule:default",
|
"strategy:detail": "rule:default",
|
||||||
"strategy:get": "rule:default",
|
"strategy:get": "rule:default",
|
||||||
"strategy:get_all": "rule:default"
|
"strategy:get_all": "rule:default"
|
||||||
|
@ -34,6 +34,7 @@ from watcher.api.controllers.v1 import action_plan
|
|||||||
from watcher.api.controllers.v1 import audit
|
from watcher.api.controllers.v1 import audit
|
||||||
from watcher.api.controllers.v1 import audit_template
|
from watcher.api.controllers.v1 import audit_template
|
||||||
from watcher.api.controllers.v1 import goal
|
from watcher.api.controllers.v1 import goal
|
||||||
|
from watcher.api.controllers.v1 import scoring_engine
|
||||||
from watcher.api.controllers.v1 import strategy
|
from watcher.api.controllers.v1 import strategy
|
||||||
|
|
||||||
|
|
||||||
@ -101,6 +102,9 @@ class V1(APIBase):
|
|||||||
action_plans = [link.Link]
|
action_plans = [link.Link]
|
||||||
"""Links to the action plans resource"""
|
"""Links to the action plans resource"""
|
||||||
|
|
||||||
|
scoring_engines = [link.Link]
|
||||||
|
"""Links to the Scoring Engines resource"""
|
||||||
|
|
||||||
links = [link.Link]
|
links = [link.Link]
|
||||||
"""Links that point to a specific URL for this version and documentation"""
|
"""Links that point to a specific URL for this version and documentation"""
|
||||||
|
|
||||||
@ -147,6 +151,14 @@ class V1(APIBase):
|
|||||||
'action_plans', '',
|
'action_plans', '',
|
||||||
bookmark=True)
|
bookmark=True)
|
||||||
]
|
]
|
||||||
|
|
||||||
|
v1.scoring_engines = [link.Link.make_link(
|
||||||
|
'self', pecan.request.host_url, 'scoring_engines', ''),
|
||||||
|
link.Link.make_link('bookmark',
|
||||||
|
pecan.request.host_url,
|
||||||
|
'scoring_engines', '',
|
||||||
|
bookmark=True)
|
||||||
|
]
|
||||||
return v1
|
return v1
|
||||||
|
|
||||||
|
|
||||||
@ -158,6 +170,7 @@ class Controller(rest.RestController):
|
|||||||
actions = action.ActionsController()
|
actions = action.ActionsController()
|
||||||
action_plans = action_plan.ActionPlansController()
|
action_plans = action_plan.ActionPlansController()
|
||||||
goals = goal.GoalsController()
|
goals = goal.GoalsController()
|
||||||
|
scoring_engines = scoring_engine.ScoringEngineController()
|
||||||
strategies = strategy.StrategiesController()
|
strategies = strategy.StrategiesController()
|
||||||
|
|
||||||
@wsme_pecan.wsexpose(V1)
|
@wsme_pecan.wsexpose(V1)
|
||||||
|
@ -35,7 +35,7 @@ class Collection(base.APIBase):
|
|||||||
"""Return whether collection has more items."""
|
"""Return whether collection has more items."""
|
||||||
return len(self.collection) and len(self.collection) == limit
|
return len(self.collection) and len(self.collection) == limit
|
||||||
|
|
||||||
def get_next(self, limit, url=None, **kwargs):
|
def get_next(self, limit, url=None, marker_field="uuid", **kwargs):
|
||||||
"""Return a link to the next subset of the collection."""
|
"""Return a link to the next subset of the collection."""
|
||||||
if not self.has_next(limit):
|
if not self.has_next(limit):
|
||||||
return wtypes.Unset
|
return wtypes.Unset
|
||||||
@ -44,7 +44,7 @@ class Collection(base.APIBase):
|
|||||||
q_args = ''.join(['%s=%s&' % (key, kwargs[key]) for key in kwargs])
|
q_args = ''.join(['%s=%s&' % (key, kwargs[key]) for key in kwargs])
|
||||||
next_args = '?%(args)slimit=%(limit)d&marker=%(marker)s' % {
|
next_args = '?%(args)slimit=%(limit)d&marker=%(marker)s' % {
|
||||||
'args': q_args, 'limit': limit,
|
'args': q_args, 'limit': limit,
|
||||||
'marker': getattr(self.collection[-1], "uuid")}
|
'marker': getattr(self.collection[-1], marker_field)}
|
||||||
|
|
||||||
return link.Link.make_link('next', pecan.request.host_url,
|
return link.Link.make_link('next', pecan.request.host_url,
|
||||||
resource_url, next_args).href
|
resource_url, next_args).href
|
||||||
|
246
watcher/api/controllers/v1/scoring_engine.py
Normal file
246
watcher/api/controllers/v1/scoring_engine.py
Normal file
@ -0,0 +1,246 @@
|
|||||||
|
# -*- encoding: utf-8 -*-
|
||||||
|
# Copyright 2016 Intel
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
A :ref:`Scoring Engine <scoring_engine_definition>` is an instance of a data
|
||||||
|
model, to which a learning data was applied.
|
||||||
|
|
||||||
|
Because there might be multiple algorithms used to build a particular data
|
||||||
|
model (and therefore a scoring engine), the usage of scoring engine might
|
||||||
|
vary. A metainfo field is supposed to contain any information which might
|
||||||
|
be needed by the user of a given scoring engine.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import pecan
|
||||||
|
from pecan import rest
|
||||||
|
import wsme
|
||||||
|
from wsme import types as wtypes
|
||||||
|
import wsmeext.pecan as wsme_pecan
|
||||||
|
|
||||||
|
from watcher.api.controllers import base
|
||||||
|
from watcher.api.controllers import link
|
||||||
|
from watcher.api.controllers.v1 import collection
|
||||||
|
from watcher.api.controllers.v1 import types
|
||||||
|
from watcher.api.controllers.v1 import utils as api_utils
|
||||||
|
from watcher.common import exception
|
||||||
|
from watcher.common import policy
|
||||||
|
from watcher import objects
|
||||||
|
|
||||||
|
|
||||||
|
class ScoringEngine(base.APIBase):
|
||||||
|
"""API representation of a scoring engine.
|
||||||
|
|
||||||
|
This class enforces type checking and value constraints, and converts
|
||||||
|
between the internal object model and the API representation of a scoring
|
||||||
|
engine.
|
||||||
|
"""
|
||||||
|
|
||||||
|
uuid = types.uuid
|
||||||
|
"""Unique UUID of the scoring engine"""
|
||||||
|
|
||||||
|
name = wtypes.text
|
||||||
|
"""The name of the scoring engine"""
|
||||||
|
|
||||||
|
description = wtypes.text
|
||||||
|
"""A human readable description of the Scoring Engine"""
|
||||||
|
|
||||||
|
metainfo = wtypes.text
|
||||||
|
"""A metadata associated with the scoring engine"""
|
||||||
|
|
||||||
|
links = wsme.wsattr([link.Link], readonly=True)
|
||||||
|
"""A list containing a self link and associated action links"""
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
super(ScoringEngine, self).__init__()
|
||||||
|
|
||||||
|
self.fields = []
|
||||||
|
self.fields.append('uuid')
|
||||||
|
self.fields.append('name')
|
||||||
|
self.fields.append('description')
|
||||||
|
self.fields.append('metainfo')
|
||||||
|
setattr(self, 'uuid', kwargs.get('uuid', wtypes.Unset))
|
||||||
|
setattr(self, 'name', kwargs.get('name', wtypes.Unset))
|
||||||
|
setattr(self, 'description', kwargs.get('description', wtypes.Unset))
|
||||||
|
setattr(self, 'metainfo', kwargs.get('metainfo', wtypes.Unset))
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _convert_with_links(se, url, expand=True):
|
||||||
|
if not expand:
|
||||||
|
se.unset_fields_except(
|
||||||
|
['uuid', 'name', 'description', 'metainfo'])
|
||||||
|
|
||||||
|
se.links = [link.Link.make_link('self', url,
|
||||||
|
'scoring_engines', se.uuid),
|
||||||
|
link.Link.make_link('bookmark', url,
|
||||||
|
'scoring_engines', se.uuid,
|
||||||
|
bookmark=True)]
|
||||||
|
return se
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def convert_with_links(cls, scoring_engine, expand=True):
|
||||||
|
scoring_engine = ScoringEngine(**scoring_engine.as_dict())
|
||||||
|
return cls._convert_with_links(
|
||||||
|
scoring_engine, pecan.request.host_url, expand)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def sample(cls, expand=True):
|
||||||
|
sample = cls(uuid='81bbd3c7-3b08-4d12-a268-99354dbf7b71',
|
||||||
|
name='sample-se-123',
|
||||||
|
description='Sample Scoring Engine 123 just for testing')
|
||||||
|
return cls._convert_with_links(sample, 'http://localhost:9322', expand)
|
||||||
|
|
||||||
|
|
||||||
|
class ScoringEngineCollection(collection.Collection):
|
||||||
|
"""API representation of a collection of scoring engines."""
|
||||||
|
|
||||||
|
scoring_engines = [ScoringEngine]
|
||||||
|
"""A list containing scoring engine objects"""
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
super(ScoringEngineCollection, self).__init__()
|
||||||
|
self._type = 'scoring_engines'
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def convert_with_links(scoring_engines, limit, url=None, expand=False,
|
||||||
|
**kwargs):
|
||||||
|
|
||||||
|
collection = ScoringEngineCollection()
|
||||||
|
collection.scoring_engines = [ScoringEngine.convert_with_links(
|
||||||
|
se, expand) for se in scoring_engines]
|
||||||
|
|
||||||
|
if 'sort_key' in kwargs:
|
||||||
|
reverse = False
|
||||||
|
if kwargs['sort_key'] == 'name':
|
||||||
|
if 'sort_dir' in kwargs:
|
||||||
|
reverse = True if kwargs['sort_dir'] == 'desc' else False
|
||||||
|
collection.goals = sorted(
|
||||||
|
collection.scoring_engines,
|
||||||
|
key=lambda se: se.name,
|
||||||
|
reverse=reverse)
|
||||||
|
|
||||||
|
collection.next = collection.get_next(limit, url=url, **kwargs)
|
||||||
|
return collection
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def sample(cls):
|
||||||
|
sample = cls()
|
||||||
|
sample.scoring_engines = [ScoringEngine.sample(expand=False)]
|
||||||
|
return sample
|
||||||
|
|
||||||
|
|
||||||
|
class ScoringEngineController(rest.RestController):
|
||||||
|
"""REST controller for Scoring Engines."""
|
||||||
|
def __init__(self):
|
||||||
|
super(ScoringEngineController, self).__init__()
|
||||||
|
|
||||||
|
from_scoring_engines = False
|
||||||
|
"""A flag to indicate if the requests to this controller are coming
|
||||||
|
from the top-level resource Scoring Engines."""
|
||||||
|
|
||||||
|
_custom_actions = {
|
||||||
|
'detail': ['GET'],
|
||||||
|
}
|
||||||
|
|
||||||
|
def _get_scoring_engines_collection(self, marker, limit,
|
||||||
|
sort_key, sort_dir, expand=False,
|
||||||
|
resource_url=None):
|
||||||
|
|
||||||
|
limit = api_utils.validate_limit(limit)
|
||||||
|
api_utils.validate_sort_dir(sort_dir)
|
||||||
|
|
||||||
|
marker_obj = None
|
||||||
|
if marker:
|
||||||
|
marker_obj = objects.ScoringEngine.get_by_uuid(
|
||||||
|
pecan.request.context, marker)
|
||||||
|
|
||||||
|
filters = {}
|
||||||
|
|
||||||
|
sort_db_key = sort_key
|
||||||
|
|
||||||
|
scoring_engines = objects.ScoringEngine.list(
|
||||||
|
context=pecan.request.context,
|
||||||
|
limit=limit,
|
||||||
|
marker=marker_obj,
|
||||||
|
sort_key=sort_db_key,
|
||||||
|
sort_dir=sort_dir,
|
||||||
|
filters=filters)
|
||||||
|
|
||||||
|
return ScoringEngineCollection.convert_with_links(
|
||||||
|
scoring_engines,
|
||||||
|
limit,
|
||||||
|
url=resource_url,
|
||||||
|
expand=expand,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose(ScoringEngineCollection, wtypes.text,
|
||||||
|
int, wtypes.text, wtypes.text)
|
||||||
|
def get_all(self, marker=None, limit=None, sort_key='id',
|
||||||
|
sort_dir='asc'):
|
||||||
|
"""Retrieve a list of Scoring Engines.
|
||||||
|
|
||||||
|
:param marker: pagination marker for large data sets.
|
||||||
|
:param limit: maximum number of resources to return in a single result.
|
||||||
|
:param sort_key: column to sort results by. Default: name.
|
||||||
|
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
|
||||||
|
"""
|
||||||
|
context = pecan.request.context
|
||||||
|
policy.enforce(context, 'scoring_engine:get_all',
|
||||||
|
action='scoring_engine:get_all')
|
||||||
|
|
||||||
|
return self._get_scoring_engines_collection(
|
||||||
|
marker, limit, sort_key, sort_dir)
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose(ScoringEngineCollection, wtypes.text,
|
||||||
|
int, wtypes.text, wtypes.text)
|
||||||
|
def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc'):
|
||||||
|
"""Retrieve a list of Scoring Engines with detail.
|
||||||
|
|
||||||
|
:param marker: pagination marker for large data sets.
|
||||||
|
:param limit: maximum number of resources to return in a single result.
|
||||||
|
:param sort_key: column to sort results by. Default: name.
|
||||||
|
:param sort_dir: direction to sort. "asc" or "desc". Default: asc.
|
||||||
|
"""
|
||||||
|
context = pecan.request.context
|
||||||
|
policy.enforce(context, 'scoring_engine:detail',
|
||||||
|
action='scoring_engine:detail')
|
||||||
|
|
||||||
|
parent = pecan.request.path.split('/')[:-1][-1]
|
||||||
|
if parent != "scoring_engines":
|
||||||
|
raise exception.HTTPNotFound
|
||||||
|
expand = True
|
||||||
|
resource_url = '/'.join(['scoring_engines', 'detail'])
|
||||||
|
return self._get_scoring_engines_collection(
|
||||||
|
marker, limit, sort_key, sort_dir, expand, resource_url)
|
||||||
|
|
||||||
|
@wsme_pecan.wsexpose(ScoringEngine, wtypes.text)
|
||||||
|
def get_one(self, scoring_engine):
|
||||||
|
"""Retrieve information about the given Scoring Engine.
|
||||||
|
|
||||||
|
:param scoring_engine_name: The name of the Scoring Engine.
|
||||||
|
"""
|
||||||
|
context = pecan.request.context
|
||||||
|
policy.enforce(context, 'scoring_engine:get',
|
||||||
|
action='scoring_engine:get')
|
||||||
|
|
||||||
|
if self.from_scoring_engines:
|
||||||
|
raise exception.OperationNotPermitted
|
||||||
|
|
||||||
|
rpc_scoring_engine = api_utils.get_resource(
|
||||||
|
'ScoringEngine', scoring_engine)
|
||||||
|
|
||||||
|
return ScoringEngine.convert_with_links(rpc_scoring_engine)
|
@ -274,6 +274,14 @@ class EfficacyIndicatorAlreadyExists(Conflict):
|
|||||||
msg_fmt = _("An action with UUID %(uuid)s already exists")
|
msg_fmt = _("An action with UUID %(uuid)s already exists")
|
||||||
|
|
||||||
|
|
||||||
|
class ScoringEngineAlreadyExists(Conflict):
|
||||||
|
msg_fmt = _("A scoring engine with UUID %(uuid)s already exists")
|
||||||
|
|
||||||
|
|
||||||
|
class ScoringEngineNotFound(ResourceNotFound):
|
||||||
|
msg_fmt = _("ScoringEngine %(scoring_engine)s could not be found")
|
||||||
|
|
||||||
|
|
||||||
class HTTPNotFound(ResourceNotFound):
|
class HTTPNotFound(ResourceNotFound):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@ -635,3 +635,83 @@ class BaseConnection(object):
|
|||||||
:raises: :py:class:`~.EfficacyIndicatorNotFound`
|
:raises: :py:class:`~.EfficacyIndicatorNotFound`
|
||||||
:raises: :py:class:`~.Invalid`
|
:raises: :py:class:`~.Invalid`
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def get_scoring_engine_list(
|
||||||
|
self, context, columns=None, filters=None, limit=None,
|
||||||
|
marker=None, sort_key=None, sort_dir=None):
|
||||||
|
"""Get specific columns for matching scoring engines.
|
||||||
|
|
||||||
|
Return a list of the specified columns for all scoring engines that
|
||||||
|
match the specified filters.
|
||||||
|
|
||||||
|
:param context: The security context
|
||||||
|
:param columns: List of column names to return.
|
||||||
|
Defaults to 'id' column when columns == None.
|
||||||
|
:param filters: Filters to apply. Defaults to None.
|
||||||
|
:param limit: Maximum number of scoring engines to return.
|
||||||
|
:param marker: the last item of the previous page; we return the next
|
||||||
|
result set.
|
||||||
|
:param sort_key: Attribute by which results should be sorted.
|
||||||
|
:param sort_dir: direction in which results should be sorted.
|
||||||
|
(asc, desc)
|
||||||
|
:returns: A list of tuples of the specified columns.
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def create_scoring_engine(self, values):
|
||||||
|
"""Create a new scoring engine.
|
||||||
|
|
||||||
|
:param values: A dict containing several items used to identify
|
||||||
|
and track the scoring engine.
|
||||||
|
:returns: A scoring engine.
|
||||||
|
:raises: :py:class:`~.ScoringEngineAlreadyExists`
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def get_scoring_engine_by_id(self, context, scoring_engine_id):
|
||||||
|
"""Return a scoring engine by its id.
|
||||||
|
|
||||||
|
:param context: The security context
|
||||||
|
:param scoring_engine_id: The id of a scoring engine.
|
||||||
|
:returns: A scoring engine.
|
||||||
|
:raises: :py:class:`~.ScoringEngineNotFound`
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def get_scoring_engine_by_uuid(self, context, scoring_engine_uuid):
|
||||||
|
"""Return a scoring engine by its uuid.
|
||||||
|
|
||||||
|
:param context: The security context
|
||||||
|
:param scoring_engine_uuid: The uuid of a scoring engine.
|
||||||
|
:returns: A scoring engine.
|
||||||
|
:raises: :py:class:`~.ScoringEngineNotFound`
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def get_scoring_engine_by_name(self, context, scoring_engine_name):
|
||||||
|
"""Return a scoring engine by its name.
|
||||||
|
|
||||||
|
:param context: The security context
|
||||||
|
:param scoring_engine_name: The name of a scoring engine.
|
||||||
|
:returns: A scoring engine.
|
||||||
|
:raises: :py:class:`~.ScoringEngineNotFound`
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def destroy_scoring_engine(self, scoring_engine_id):
|
||||||
|
"""Destroy a scoring engine.
|
||||||
|
|
||||||
|
:param scoring_engine_id: The id of a scoring engine.
|
||||||
|
:raises: :py:class:`~.ScoringEngineNotFound`
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def update_scoring_engine(self, scoring_engine_id, values):
|
||||||
|
"""Update properties of a scoring engine.
|
||||||
|
|
||||||
|
:param scoring_engine_id: The id of a scoring engine.
|
||||||
|
:returns: A scoring engine.
|
||||||
|
:raises: :py:class:`~.ScoringEngineNotFound`
|
||||||
|
:raises: :py:class:`~.Invalid`
|
||||||
|
"""
|
||||||
|
@ -974,3 +974,86 @@ class Connection(api.BaseConnection):
|
|||||||
except exception.ResourceNotFound:
|
except exception.ResourceNotFound:
|
||||||
raise exception.EfficacyIndicatorNotFound(
|
raise exception.EfficacyIndicatorNotFound(
|
||||||
efficacy_indicator=efficacy_indicator_id)
|
efficacy_indicator=efficacy_indicator_id)
|
||||||
|
|
||||||
|
# ### SCORING ENGINES ### #
|
||||||
|
|
||||||
|
def _add_scoring_engine_filters(self, query, filters):
|
||||||
|
if filters is None:
|
||||||
|
filters = {}
|
||||||
|
|
||||||
|
plain_fields = ['id', 'description']
|
||||||
|
|
||||||
|
return self._add_filters(
|
||||||
|
query=query, model=models.ScoringEngine, filters=filters,
|
||||||
|
plain_fields=plain_fields)
|
||||||
|
|
||||||
|
def get_scoring_engine_list(
|
||||||
|
self, context, columns=None, filters=None, limit=None,
|
||||||
|
marker=None, sort_key=None, sort_dir=None):
|
||||||
|
query = model_query(models.ScoringEngine)
|
||||||
|
query = self._add_scoring_engine_filters(query, filters)
|
||||||
|
if not context.show_deleted:
|
||||||
|
query = query.filter_by(deleted_at=None)
|
||||||
|
|
||||||
|
return _paginate_query(models.ScoringEngine, limit, marker,
|
||||||
|
sort_key, sort_dir, query)
|
||||||
|
|
||||||
|
def create_scoring_engine(self, values):
|
||||||
|
# ensure defaults are present for new scoring engines
|
||||||
|
if not values.get('uuid'):
|
||||||
|
values['uuid'] = utils.generate_uuid()
|
||||||
|
|
||||||
|
scoring_engine = models.ScoringEngine()
|
||||||
|
scoring_engine.update(values)
|
||||||
|
|
||||||
|
try:
|
||||||
|
scoring_engine.save()
|
||||||
|
except db_exc.DBDuplicateEntry:
|
||||||
|
raise exception.ScoringEngineAlreadyExists(uuid=values['uuid'])
|
||||||
|
return scoring_engine
|
||||||
|
|
||||||
|
def _get_scoring_engine(self, context, fieldname, value):
|
||||||
|
try:
|
||||||
|
return self._get(context, model=models.ScoringEngine,
|
||||||
|
fieldname=fieldname, value=value)
|
||||||
|
except exception.ResourceNotFound:
|
||||||
|
raise exception.ScoringEngineNotFound(scoring_engine=value)
|
||||||
|
|
||||||
|
def get_scoring_engine_by_id(self, context, scoring_engine_id):
|
||||||
|
return self._get_scoring_engine(
|
||||||
|
context, fieldname="id", value=scoring_engine_id)
|
||||||
|
|
||||||
|
def get_scoring_engine_by_uuid(self, context, scoring_engine_uuid):
|
||||||
|
return self._get_scoring_engine(
|
||||||
|
context, fieldname="uuid", value=scoring_engine_uuid)
|
||||||
|
|
||||||
|
def get_scoring_engine_by_name(self, context, scoring_engine_name):
|
||||||
|
return self._get_scoring_engine(
|
||||||
|
context, fieldname="name", value=scoring_engine_name)
|
||||||
|
|
||||||
|
def destroy_scoring_engine(self, scoring_engine_id):
|
||||||
|
try:
|
||||||
|
return self._destroy(models.ScoringEngine, scoring_engine_id)
|
||||||
|
except exception.ResourceNotFound:
|
||||||
|
raise exception.ScoringEngineNotFound(
|
||||||
|
scoring_engine=scoring_engine_id)
|
||||||
|
|
||||||
|
def update_scoring_engine(self, scoring_engine_id, values):
|
||||||
|
if 'id' in values:
|
||||||
|
raise exception.Invalid(
|
||||||
|
message=_("Cannot overwrite ID for an existing "
|
||||||
|
"Scoring Engine."))
|
||||||
|
|
||||||
|
try:
|
||||||
|
return self._update(
|
||||||
|
models.ScoringEngine, scoring_engine_id, values)
|
||||||
|
except exception.ResourceNotFound:
|
||||||
|
raise exception.ScoringEngineNotFound(
|
||||||
|
scoring_engine=scoring_engine_id)
|
||||||
|
|
||||||
|
def soft_delete_scoring_engine(self, scoring_engine_id):
|
||||||
|
try:
|
||||||
|
return self._soft_delete(models.ScoringEngine, scoring_engine_id)
|
||||||
|
except exception.ResourceNotFound:
|
||||||
|
raise exception.ScoringEngineNotFound(
|
||||||
|
scoring_engine=scoring_engine_id)
|
||||||
|
@ -29,6 +29,7 @@ from sqlalchemy import Integer
|
|||||||
from sqlalchemy import Numeric
|
from sqlalchemy import Numeric
|
||||||
from sqlalchemy import schema
|
from sqlalchemy import schema
|
||||||
from sqlalchemy import String
|
from sqlalchemy import String
|
||||||
|
from sqlalchemy import Text
|
||||||
from sqlalchemy.types import TypeDecorator, TEXT
|
from sqlalchemy.types import TypeDecorator, TEXT
|
||||||
|
|
||||||
from watcher.common import paths
|
from watcher.common import paths
|
||||||
@ -230,3 +231,21 @@ class EfficacyIndicator(Base):
|
|||||||
value = Column(Numeric())
|
value = Column(Numeric())
|
||||||
action_plan_id = Column(Integer, ForeignKey('action_plans.id'),
|
action_plan_id = Column(Integer, ForeignKey('action_plans.id'),
|
||||||
nullable=False)
|
nullable=False)
|
||||||
|
|
||||||
|
|
||||||
|
class ScoringEngine(Base):
|
||||||
|
"""Represents a scoring engine."""
|
||||||
|
|
||||||
|
__tablename__ = 'scoring_engines'
|
||||||
|
__table_args__ = (
|
||||||
|
schema.UniqueConstraint('uuid', name='uniq_scoring_engines0uuid'),
|
||||||
|
table_args()
|
||||||
|
)
|
||||||
|
id = Column(Integer, primary_key=True)
|
||||||
|
uuid = Column(String(36), nullable=False)
|
||||||
|
name = Column(String(63), nullable=False)
|
||||||
|
description = Column(String(255), nullable=True)
|
||||||
|
# Metainfo might contain some additional information about the data model.
|
||||||
|
# The format might vary between different models (e.g. be JSON, XML or
|
||||||
|
# even some custom format), the blob type should cover all scenarios.
|
||||||
|
metainfo = Column(Text, nullable=True)
|
||||||
|
@ -19,6 +19,7 @@ from watcher.objects import audit
|
|||||||
from watcher.objects import audit_template
|
from watcher.objects import audit_template
|
||||||
from watcher.objects import efficacy_indicator
|
from watcher.objects import efficacy_indicator
|
||||||
from watcher.objects import goal
|
from watcher.objects import goal
|
||||||
|
from watcher.objects import scoring_engine
|
||||||
from watcher.objects import strategy
|
from watcher.objects import strategy
|
||||||
|
|
||||||
Audit = audit.Audit
|
Audit = audit.Audit
|
||||||
@ -26,8 +27,9 @@ AuditTemplate = audit_template.AuditTemplate
|
|||||||
Action = action.Action
|
Action = action.Action
|
||||||
ActionPlan = action_plan.ActionPlan
|
ActionPlan = action_plan.ActionPlan
|
||||||
Goal = goal.Goal
|
Goal = goal.Goal
|
||||||
|
ScoringEngine = scoring_engine.ScoringEngine
|
||||||
Strategy = strategy.Strategy
|
Strategy = strategy.Strategy
|
||||||
EfficacyIndicator = efficacy_indicator.EfficacyIndicator
|
EfficacyIndicator = efficacy_indicator.EfficacyIndicator
|
||||||
|
|
||||||
__all__ = ("Audit", "AuditTemplate", "Action", "ActionPlan",
|
__all__ = ("Audit", "AuditTemplate", "Action", "ActionPlan",
|
||||||
"Goal", "Strategy", "EfficacyIndicator")
|
"Goal", "ScoringEngine", "Strategy", "EfficacyIndicator")
|
||||||
|
257
watcher/objects/scoring_engine.py
Normal file
257
watcher/objects/scoring_engine.py
Normal file
@ -0,0 +1,257 @@
|
|||||||
|
# -*- encoding: utf-8 -*-
|
||||||
|
# Copyright 2016 Intel
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
A :ref:`Scoring Engine <scoring_engine_definition>` is an instance of a data
|
||||||
|
model, to which a learning data was applied.
|
||||||
|
|
||||||
|
Because there might be multiple algorithms used to build a particular data
|
||||||
|
model (and therefore a scoring engine), the usage of scoring engine might
|
||||||
|
vary. A metainfo field is supposed to contain any information which might
|
||||||
|
be needed by the user of a given scoring engine.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from watcher.common import exception
|
||||||
|
from watcher.common import utils
|
||||||
|
from watcher.db import api as dbapi
|
||||||
|
from watcher.objects import base
|
||||||
|
from watcher.objects import utils as obj_utils
|
||||||
|
|
||||||
|
|
||||||
|
class ScoringEngine(base.WatcherObject):
|
||||||
|
# Version 1.0: Initial version
|
||||||
|
VERSION = '1.0'
|
||||||
|
|
||||||
|
dbapi = dbapi.get_instance()
|
||||||
|
|
||||||
|
fields = {
|
||||||
|
'id': int,
|
||||||
|
'uuid': obj_utils.str_or_none,
|
||||||
|
'name': obj_utils.str_or_none,
|
||||||
|
'description': obj_utils.str_or_none,
|
||||||
|
'metainfo': obj_utils.str_or_none,
|
||||||
|
}
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _from_db_object(scoring_engine, db_scoring_engine):
|
||||||
|
"""Converts a database entity to a formal object."""
|
||||||
|
for field in scoring_engine.fields:
|
||||||
|
scoring_engine[field] = db_scoring_engine[field]
|
||||||
|
|
||||||
|
scoring_engine.obj_reset_changes()
|
||||||
|
return scoring_engine
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _from_db_object_list(db_objects, cls, context):
|
||||||
|
"""Converts a list of database entities to a list of formal objects."""
|
||||||
|
return \
|
||||||
|
[ScoringEngine._from_db_object(cls(context), obj)
|
||||||
|
for obj in db_objects]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get(cls, context, scoring_engine_id):
|
||||||
|
"""Find a scoring engine based on its id or uuid
|
||||||
|
|
||||||
|
:param context: Security context. NOTE: This should only
|
||||||
|
be used internally by the indirection_api.
|
||||||
|
Unfortunately, RPC requires context as the first
|
||||||
|
argument, even though we don't use it.
|
||||||
|
A context should be set when instantiating the
|
||||||
|
object, e.g.: ScoringEngine(context)
|
||||||
|
:param scoring_engine_name: the name of a scoring_engine.
|
||||||
|
:returns: a :class:`ScoringEngine` object.
|
||||||
|
"""
|
||||||
|
if utils.is_int_like(scoring_engine_id):
|
||||||
|
return cls.get_by_id(context, scoring_engine_id)
|
||||||
|
elif utils.is_uuid_like(scoring_engine_id):
|
||||||
|
return cls.get_by_uuid(context, scoring_engine_id)
|
||||||
|
else:
|
||||||
|
raise exception.InvalidIdentity(identity=scoring_engine_id)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_by_id(cls, context, scoring_engine_id):
|
||||||
|
"""Find a scoring engine based on its id
|
||||||
|
|
||||||
|
:param context: Security context. NOTE: This should only
|
||||||
|
be used internally by the indirection_api.
|
||||||
|
Unfortunately, RPC requires context as the first
|
||||||
|
argument, even though we don't use it.
|
||||||
|
A context should be set when instantiating the
|
||||||
|
object, e.g.: ScoringEngine(context)
|
||||||
|
:param scoring_engine_id: the id of a scoring_engine.
|
||||||
|
:returns: a :class:`ScoringEngine` object.
|
||||||
|
"""
|
||||||
|
|
||||||
|
db_scoring_engine = cls.dbapi.get_scoring_engine_by_id(
|
||||||
|
context,
|
||||||
|
scoring_engine_id)
|
||||||
|
scoring_engine = ScoringEngine._from_db_object(cls(context),
|
||||||
|
db_scoring_engine)
|
||||||
|
return scoring_engine
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_by_uuid(cls, context, scoring_engine_uuid):
|
||||||
|
"""Find a scoring engine based on its uuid
|
||||||
|
|
||||||
|
:param context: Security context. NOTE: This should only
|
||||||
|
be used internally by the indirection_api.
|
||||||
|
Unfortunately, RPC requires context as the first
|
||||||
|
argument, even though we don't use it.
|
||||||
|
A context should be set when instantiating the
|
||||||
|
object, e.g.: ScoringEngine(context)
|
||||||
|
:param scoring_engine_uuid: the uuid of a scoring_engine.
|
||||||
|
:returns: a :class:`ScoringEngine` object.
|
||||||
|
"""
|
||||||
|
|
||||||
|
db_scoring_engine = cls.dbapi.get_scoring_engine_by_uuid(
|
||||||
|
context,
|
||||||
|
scoring_engine_uuid)
|
||||||
|
scoring_engine = ScoringEngine._from_db_object(cls(context),
|
||||||
|
db_scoring_engine)
|
||||||
|
return scoring_engine
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_by_name(cls, context, scoring_engine_name):
|
||||||
|
"""Find a scoring engine based on its name
|
||||||
|
|
||||||
|
:param context: Security context. NOTE: This should only
|
||||||
|
be used internally by the indirection_api.
|
||||||
|
Unfortunately, RPC requires context as the first
|
||||||
|
argument, even though we don't use it.
|
||||||
|
A context should be set when instantiating the
|
||||||
|
object, e.g.: ScoringEngine(context)
|
||||||
|
:param scoring_engine_name: the name of a scoring_engine.
|
||||||
|
:returns: a :class:`ScoringEngine` object.
|
||||||
|
"""
|
||||||
|
|
||||||
|
db_scoring_engine = cls.dbapi.get_scoring_engine_by_name(
|
||||||
|
context,
|
||||||
|
scoring_engine_name)
|
||||||
|
scoring_engine = ScoringEngine._from_db_object(cls(context),
|
||||||
|
db_scoring_engine)
|
||||||
|
return scoring_engine
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def list(cls, context, filters=None, limit=None, marker=None,
|
||||||
|
sort_key=None, sort_dir=None):
|
||||||
|
"""Return a list of :class:`ScoringEngine` objects.
|
||||||
|
|
||||||
|
:param context: Security context. NOTE: This should only
|
||||||
|
be used internally by the indirection_api.
|
||||||
|
Unfortunately, RPC requires context as the first
|
||||||
|
argument, even though we don't use it.
|
||||||
|
A context should be set when instantiating the
|
||||||
|
object, e.g.: ScoringEngine(context)
|
||||||
|
:param filters: dict mapping the filter key to a value.
|
||||||
|
:param limit: maximum number of resources to return in a single result.
|
||||||
|
:param marker: pagination marker for large data sets.
|
||||||
|
:param sort_key: column to sort results by.
|
||||||
|
:param sort_dir: direction to sort. "asc" or "desc".
|
||||||
|
:returns: a list of :class:`ScoringEngine` objects.
|
||||||
|
"""
|
||||||
|
|
||||||
|
db_scoring_engines = cls.dbapi.get_scoring_engine_list(
|
||||||
|
context,
|
||||||
|
filters=filters,
|
||||||
|
limit=limit,
|
||||||
|
marker=marker,
|
||||||
|
sort_key=sort_key,
|
||||||
|
sort_dir=sort_dir)
|
||||||
|
return ScoringEngine._from_db_object_list(db_scoring_engines,
|
||||||
|
cls, context)
|
||||||
|
|
||||||
|
def create(self, context=None):
|
||||||
|
"""Create a :class:`ScoringEngine` record in the DB.
|
||||||
|
|
||||||
|
:param context: Security context. NOTE: This should only
|
||||||
|
be used internally by the indirection_api.
|
||||||
|
Unfortunately, RPC requires context as the first
|
||||||
|
argument, even though we don't use it.
|
||||||
|
A context should be set when instantiating the
|
||||||
|
object, e.g.: ScoringEngine(context)
|
||||||
|
"""
|
||||||
|
|
||||||
|
values = self.obj_get_changes()
|
||||||
|
db_scoring_engine = self.dbapi.create_scoring_engine(values)
|
||||||
|
self._from_db_object(self, db_scoring_engine)
|
||||||
|
|
||||||
|
def destroy(self, context=None):
|
||||||
|
"""Delete the :class:`ScoringEngine` from the DB.
|
||||||
|
|
||||||
|
:param context: Security context. NOTE: This should only
|
||||||
|
be used internally by the indirection_api.
|
||||||
|
Unfortunately, RPC requires context as the first
|
||||||
|
argument, even though we don't use it.
|
||||||
|
A context should be set when instantiating the
|
||||||
|
object, e.g.: ScoringEngine(context)
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.dbapi.destroy_scoring_engine(self.id)
|
||||||
|
self.obj_reset_changes()
|
||||||
|
|
||||||
|
def save(self, context=None):
|
||||||
|
"""Save updates to this :class:`ScoringEngine`.
|
||||||
|
|
||||||
|
Updates will be made column by column based on the result
|
||||||
|
of self.what_changed().
|
||||||
|
|
||||||
|
:param context: Security context. NOTE: This should only
|
||||||
|
be used internally by the indirection_api.
|
||||||
|
Unfortunately, RPC requires context as the first
|
||||||
|
argument, even though we don't use it.
|
||||||
|
A context should be set when instantiating the
|
||||||
|
object, e.g.: ScoringEngine(context)
|
||||||
|
"""
|
||||||
|
|
||||||
|
updates = self.obj_get_changes()
|
||||||
|
self.dbapi.update_scoring_engine(self.id, updates)
|
||||||
|
|
||||||
|
self.obj_reset_changes()
|
||||||
|
|
||||||
|
def refresh(self, context=None):
|
||||||
|
"""Loads updates for this :class:`ScoringEngine`.
|
||||||
|
|
||||||
|
Loads a scoring_engine with the same id from the database and
|
||||||
|
checks for updated attributes. Updates are applied from
|
||||||
|
the loaded scoring_engine column by column, if there are any updates.
|
||||||
|
|
||||||
|
:param context: Security context. NOTE: This should only
|
||||||
|
be used internally by the indirection_api.
|
||||||
|
Unfortunately, RPC requires context as the first
|
||||||
|
argument, even though we don't use it.
|
||||||
|
A context should be set when instantiating the
|
||||||
|
object, e.g.: ScoringEngine(context)
|
||||||
|
"""
|
||||||
|
|
||||||
|
current = self.__class__.get_by_id(self._context,
|
||||||
|
scoring_engine_id=self.id)
|
||||||
|
for field in self.fields:
|
||||||
|
if (hasattr(self, base.get_attrname(field)) and
|
||||||
|
self[field] != current[field]):
|
||||||
|
self[field] = current[field]
|
||||||
|
|
||||||
|
def soft_delete(self, context=None):
|
||||||
|
"""soft Delete the :class:`ScoringEngine` from the DB.
|
||||||
|
|
||||||
|
:param context: Security context. NOTE: This should only
|
||||||
|
be used internally by the indirection_api.
|
||||||
|
Unfortunately, RPC requires context as the first
|
||||||
|
argument, even though we don't use it.
|
||||||
|
A context should be set when instantiating the
|
||||||
|
object, e.g.: ScoringEngine(context)
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.dbapi.soft_delete_scoring_engine(self.id)
|
@ -37,7 +37,7 @@ class TestV1Root(base.FunctionalTest):
|
|||||||
not_resources = ('id', 'links', 'media_types')
|
not_resources = ('id', 'links', 'media_types')
|
||||||
actual_resources = tuple(set(data.keys()) - set(not_resources))
|
actual_resources = tuple(set(data.keys()) - set(not_resources))
|
||||||
expected_resources = ('audit_templates', 'audits', 'actions',
|
expected_resources = ('audit_templates', 'audits', 'actions',
|
||||||
'action_plans')
|
'action_plans', 'scoring_engines')
|
||||||
self.assertEqual(sorted(expected_resources), sorted(actual_resources))
|
self.assertEqual(sorted(expected_resources), sorted(actual_resources))
|
||||||
|
|
||||||
self.assertIn({'type': 'application/vnd.openstack.watcher.v1+json',
|
self.assertIn({'type': 'application/vnd.openstack.watcher.v1+json',
|
||||||
|
160
watcher/tests/api/v1/test_scoring_engines.py
Normal file
160
watcher/tests/api/v1/test_scoring_engines.py
Normal file
@ -0,0 +1,160 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from oslo_config import cfg
|
||||||
|
from oslo_serialization import jsonutils
|
||||||
|
from watcher.common import utils
|
||||||
|
|
||||||
|
from watcher.tests.api import base as api_base
|
||||||
|
from watcher.tests.objects import utils as obj_utils
|
||||||
|
|
||||||
|
|
||||||
|
class TestListScoringEngine(api_base.FunctionalTest):
|
||||||
|
|
||||||
|
def _assert_scoring_engine_fields(self, scoring_engine):
|
||||||
|
scoring_engine_fields = ['uuid', 'name', 'description', 'metainfo']
|
||||||
|
for field in scoring_engine_fields:
|
||||||
|
self.assertIn(field, scoring_engine)
|
||||||
|
|
||||||
|
def test_one(self):
|
||||||
|
scoring_engine = obj_utils.create_test_scoring_engine(self.context)
|
||||||
|
response = self.get_json('/scoring_engines')
|
||||||
|
self.assertEqual(
|
||||||
|
scoring_engine.name, response['scoring_engines'][0]['name'])
|
||||||
|
self._assert_scoring_engine_fields(response['scoring_engines'][0])
|
||||||
|
|
||||||
|
def test_get_one_soft_deleted(self):
|
||||||
|
scoring_engine = obj_utils.create_test_scoring_engine(self.context)
|
||||||
|
scoring_engine.soft_delete()
|
||||||
|
response = self.get_json(
|
||||||
|
'/scoring_engines/%s' % scoring_engine['name'],
|
||||||
|
headers={'X-Show-Deleted': 'True'})
|
||||||
|
self.assertEqual(scoring_engine.name, response['name'])
|
||||||
|
self._assert_scoring_engine_fields(response)
|
||||||
|
|
||||||
|
response = self.get_json(
|
||||||
|
'/scoring_engines/%s' % scoring_engine['name'],
|
||||||
|
expect_errors=True)
|
||||||
|
self.assertEqual(404, response.status_int)
|
||||||
|
|
||||||
|
def test_detail(self):
|
||||||
|
obj_utils.create_test_goal(self.context)
|
||||||
|
scoring_engine = obj_utils.create_test_scoring_engine(self.context)
|
||||||
|
response = self.get_json('/scoring_engines/detail')
|
||||||
|
self.assertEqual(
|
||||||
|
scoring_engine.name, response['scoring_engines'][0]['name'])
|
||||||
|
self._assert_scoring_engine_fields(response['scoring_engines'][0])
|
||||||
|
for scoring_engine in response['scoring_engines']:
|
||||||
|
self.assertTrue(
|
||||||
|
all(val is not None for key, val in scoring_engine.items()
|
||||||
|
if key in ['uuid', 'name', 'description', 'metainfo']))
|
||||||
|
|
||||||
|
def test_detail_against_single(self):
|
||||||
|
scoring_engine = obj_utils.create_test_scoring_engine(self.context)
|
||||||
|
response = self.get_json(
|
||||||
|
'/scoring_engines/%s/detail' % scoring_engine.id,
|
||||||
|
expect_errors=True)
|
||||||
|
self.assertEqual(404, response.status_int)
|
||||||
|
|
||||||
|
def test_many(self):
|
||||||
|
scoring_engine_list = []
|
||||||
|
for idx in range(1, 6):
|
||||||
|
scoring_engine = obj_utils.create_test_scoring_engine(
|
||||||
|
self.context, id=idx, uuid=utils.generate_uuid(),
|
||||||
|
name=str(idx), description='SE_{0}'.format(idx))
|
||||||
|
scoring_engine_list.append(scoring_engine.name)
|
||||||
|
response = self.get_json('/scoring_engines')
|
||||||
|
self.assertEqual(5, len(response['scoring_engines']))
|
||||||
|
for scoring_engine in response['scoring_engines']:
|
||||||
|
self.assertTrue(
|
||||||
|
all(val is not None for key, val in scoring_engine.items()
|
||||||
|
if key in ['name', 'description', 'metainfo']))
|
||||||
|
|
||||||
|
def test_many_without_soft_deleted(self):
|
||||||
|
scoring_engine_list = []
|
||||||
|
for id_ in [1, 2, 3]:
|
||||||
|
scoring_engine = obj_utils.create_test_scoring_engine(
|
||||||
|
self.context, id=id_, uuid=utils.generate_uuid(),
|
||||||
|
name=str(id_), description='SE_{0}'.format(id_))
|
||||||
|
scoring_engine_list.append(scoring_engine.name)
|
||||||
|
for id_ in [4, 5]:
|
||||||
|
scoring_engine = obj_utils.create_test_scoring_engine(
|
||||||
|
self.context, id=id_, uuid=utils.generate_uuid(),
|
||||||
|
name=str(id_), description='SE_{0}'.format(id_))
|
||||||
|
scoring_engine.soft_delete()
|
||||||
|
response = self.get_json('/scoring_engines')
|
||||||
|
self.assertEqual(3, len(response['scoring_engines']))
|
||||||
|
names = [s['name'] for s in response['scoring_engines']]
|
||||||
|
self.assertEqual(sorted(scoring_engine_list), sorted(names))
|
||||||
|
|
||||||
|
def test_scoring_engines_collection_links(self):
|
||||||
|
for idx in range(1, 6):
|
||||||
|
obj_utils.create_test_scoring_engine(
|
||||||
|
self.context, id=idx, uuid=utils.generate_uuid(),
|
||||||
|
name=str(idx), description='SE_{0}'.format(idx))
|
||||||
|
response = self.get_json('/scoring_engines/?limit=2')
|
||||||
|
self.assertEqual(2, len(response['scoring_engines']))
|
||||||
|
|
||||||
|
def test_scoring_engines_collection_links_default_limit(self):
|
||||||
|
for idx in range(1, 6):
|
||||||
|
obj_utils.create_test_scoring_engine(
|
||||||
|
self.context, id=idx, uuid=utils.generate_uuid(),
|
||||||
|
name=str(idx), description='SE_{0}'.format(idx))
|
||||||
|
cfg.CONF.set_override('max_limit', 3, 'api', enforce_type=True)
|
||||||
|
response = self.get_json('/scoring_engines')
|
||||||
|
self.assertEqual(3, len(response['scoring_engines']))
|
||||||
|
|
||||||
|
|
||||||
|
class TestScoringEnginePolicyEnforcement(api_base.FunctionalTest):
|
||||||
|
|
||||||
|
def _common_policy_check(self, rule, func, *arg, **kwarg):
|
||||||
|
self.policy.set_rules({
|
||||||
|
"admin_api": "(role:admin or role:administrator)",
|
||||||
|
"default": "rule:admin_api",
|
||||||
|
rule: "rule:defaut"})
|
||||||
|
response = func(*arg, **kwarg)
|
||||||
|
self.assertEqual(403, response.status_int)
|
||||||
|
self.assertEqual('application/json', response.content_type)
|
||||||
|
self.assertTrue(
|
||||||
|
"Policy doesn't allow %s to be performed." % rule,
|
||||||
|
jsonutils.loads(response.json['error_message'])['faultstring'])
|
||||||
|
|
||||||
|
def test_policy_disallow_get_all(self):
|
||||||
|
self._common_policy_check(
|
||||||
|
"scoring_engine:get_all", self.get_json, '/scoring_engines',
|
||||||
|
expect_errors=True)
|
||||||
|
|
||||||
|
def test_policy_disallow_get_one(self):
|
||||||
|
se = obj_utils.create_test_scoring_engine(self.context)
|
||||||
|
self._common_policy_check(
|
||||||
|
"scoring_engine:get", self.get_json,
|
||||||
|
'/scoring_engines/%s' % se.uuid,
|
||||||
|
expect_errors=True)
|
||||||
|
|
||||||
|
def test_policy_disallow_detail(self):
|
||||||
|
self._common_policy_check(
|
||||||
|
"scoring_engine:detail", self.get_json,
|
||||||
|
'/scoring_engines/detail',
|
||||||
|
expect_errors=True)
|
||||||
|
|
||||||
|
|
||||||
|
class TestScoringEnginePolicyEnforcementWithAdminContext(
|
||||||
|
TestListScoringEngine, api_base.AdminRoleTest):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(TestScoringEnginePolicyEnforcementWithAdminContext, self).setUp()
|
||||||
|
self.policy.set_rules({
|
||||||
|
"admin_api": "(role:admin or role:administrator)",
|
||||||
|
"default": "rule:admin_api",
|
||||||
|
"scoring_engine:detail": "rule:default",
|
||||||
|
"scoring_engine:get": "rule:default",
|
||||||
|
"scoring_engine:get_all": "rule:default"})
|
337
watcher/tests/db/test_scoring_engine.py
Normal file
337
watcher/tests/db/test_scoring_engine.py
Normal file
@ -0,0 +1,337 @@
|
|||||||
|
# -*- encoding: utf-8 -*-
|
||||||
|
# Copyright (c) 2016 Intel
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
|
"""Tests for manipulating ScoringEngine via the DB API"""
|
||||||
|
|
||||||
|
import freezegun
|
||||||
|
import six
|
||||||
|
|
||||||
|
from watcher.common import exception
|
||||||
|
from watcher.common import utils as w_utils
|
||||||
|
from watcher.tests.db import base
|
||||||
|
from watcher.tests.db import utils
|
||||||
|
|
||||||
|
|
||||||
|
class TestDbScoringEngineFilters(base.DbTestCase):
|
||||||
|
|
||||||
|
FAKE_OLDER_DATE = '2014-01-01T09:52:05.219414'
|
||||||
|
FAKE_OLD_DATE = '2015-01-01T09:52:05.219414'
|
||||||
|
FAKE_TODAY = '2016-02-24T09:52:05.219414'
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(TestDbScoringEngineFilters, self).setUp()
|
||||||
|
self.context.show_deleted = True
|
||||||
|
self._data_setup()
|
||||||
|
|
||||||
|
def _data_setup(self):
|
||||||
|
with freezegun.freeze_time(self.FAKE_TODAY):
|
||||||
|
self.scoring_engine1 = utils.create_test_scoring_engine(
|
||||||
|
id=1, uuid='e8370ede-4f39-11e6-9ffa-08002722cb22',
|
||||||
|
name="se-1", description="Scoring Engine 1", metainfo="a1=b1")
|
||||||
|
with freezegun.freeze_time(self.FAKE_OLD_DATE):
|
||||||
|
self.scoring_engine2 = utils.create_test_scoring_engine(
|
||||||
|
id=2, uuid='e8370ede-4f39-11e6-9ffa-08002722cb23',
|
||||||
|
name="se-2", description="Scoring Engine 2", metainfo="a2=b2")
|
||||||
|
with freezegun.freeze_time(self.FAKE_OLDER_DATE):
|
||||||
|
self.scoring_engine3 = utils.create_test_scoring_engine(
|
||||||
|
id=3, uuid='e8370ede-4f39-11e6-9ffa-08002722cb24',
|
||||||
|
name="se-3", description="Scoring Engine 3", metainfo="a3=b3")
|
||||||
|
|
||||||
|
def _soft_delete_scoring_engines(self):
|
||||||
|
with freezegun.freeze_time(self.FAKE_TODAY):
|
||||||
|
self.dbapi.soft_delete_scoring_engine(self.scoring_engine1.id)
|
||||||
|
with freezegun.freeze_time(self.FAKE_OLD_DATE):
|
||||||
|
self.dbapi.soft_delete_scoring_engine(self.scoring_engine2.id)
|
||||||
|
with freezegun.freeze_time(self.FAKE_OLDER_DATE):
|
||||||
|
self.dbapi.soft_delete_scoring_engine(self.scoring_engine3.id)
|
||||||
|
|
||||||
|
def _update_scoring_engines(self):
|
||||||
|
with freezegun.freeze_time(self.FAKE_TODAY):
|
||||||
|
self.dbapi.update_scoring_engine(
|
||||||
|
self.scoring_engine1.id,
|
||||||
|
values={"description": "scoring_engine1"})
|
||||||
|
with freezegun.freeze_time(self.FAKE_OLD_DATE):
|
||||||
|
self.dbapi.update_scoring_engine(
|
||||||
|
self.scoring_engine2.id,
|
||||||
|
values={"description": "scoring_engine2"})
|
||||||
|
with freezegun.freeze_time(self.FAKE_OLDER_DATE):
|
||||||
|
self.dbapi.update_scoring_engine(
|
||||||
|
self.scoring_engine3.id,
|
||||||
|
values={"description": "scoring_engine3"})
|
||||||
|
|
||||||
|
def test_get_scoring_engine_list_filter_deleted_true(self):
|
||||||
|
with freezegun.freeze_time(self.FAKE_TODAY):
|
||||||
|
self.dbapi.soft_delete_scoring_engine(self.scoring_engine1.id)
|
||||||
|
|
||||||
|
res = self.dbapi.get_scoring_engine_list(
|
||||||
|
self.context, filters={'deleted': True})
|
||||||
|
|
||||||
|
self.assertEqual([self.scoring_engine1['id']], [r.id for r in res])
|
||||||
|
|
||||||
|
def test_get_scoring_engine_list_filter_deleted_false(self):
|
||||||
|
with freezegun.freeze_time(self.FAKE_TODAY):
|
||||||
|
self.dbapi.soft_delete_scoring_engine(self.scoring_engine1.id)
|
||||||
|
|
||||||
|
res = self.dbapi.get_scoring_engine_list(
|
||||||
|
self.context, filters={'deleted': False})
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
set([self.scoring_engine2['id'], self.scoring_engine3['id']]),
|
||||||
|
set([r.id for r in res]))
|
||||||
|
|
||||||
|
def test_get_scoring_engine_list_filter_deleted_at_eq(self):
|
||||||
|
self._soft_delete_scoring_engines()
|
||||||
|
|
||||||
|
res = self.dbapi.get_scoring_engine_list(
|
||||||
|
self.context, filters={'deleted_at__eq': self.FAKE_TODAY})
|
||||||
|
|
||||||
|
self.assertEqual([self.scoring_engine1['id']], [r.id for r in res])
|
||||||
|
|
||||||
|
def test_get_scoring_engine_list_filter_deleted_at_lt(self):
|
||||||
|
self._soft_delete_scoring_engines()
|
||||||
|
|
||||||
|
res = self.dbapi.get_scoring_engine_list(
|
||||||
|
self.context, filters={'deleted_at__lt': self.FAKE_TODAY})
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
set([self.scoring_engine2['id'], self.scoring_engine3['id']]),
|
||||||
|
set([r.id for r in res]))
|
||||||
|
|
||||||
|
def test_get_scoring_engine_list_filter_deleted_at_lte(self):
|
||||||
|
self._soft_delete_scoring_engines()
|
||||||
|
|
||||||
|
res = self.dbapi.get_scoring_engine_list(
|
||||||
|
self.context, filters={'deleted_at__lte': self.FAKE_OLD_DATE})
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
set([self.scoring_engine2['id'], self.scoring_engine3['id']]),
|
||||||
|
set([r.id for r in res]))
|
||||||
|
|
||||||
|
def test_get_scoring_engine_list_filter_deleted_at_gt(self):
|
||||||
|
self._soft_delete_scoring_engines()
|
||||||
|
|
||||||
|
res = self.dbapi.get_scoring_engine_list(
|
||||||
|
self.context, filters={'deleted_at__gt': self.FAKE_OLD_DATE})
|
||||||
|
|
||||||
|
self.assertEqual([self.scoring_engine1['id']], [r.id for r in res])
|
||||||
|
|
||||||
|
def test_get_scoring_engine_list_filter_deleted_at_gte(self):
|
||||||
|
self._soft_delete_scoring_engines()
|
||||||
|
|
||||||
|
res = self.dbapi.get_scoring_engine_list(
|
||||||
|
self.context, filters={'deleted_at__gte': self.FAKE_OLD_DATE})
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
set([self.scoring_engine1['id'], self.scoring_engine2['id']]),
|
||||||
|
set([r.id for r in res]))
|
||||||
|
|
||||||
|
# created_at #
|
||||||
|
|
||||||
|
def test_get_scoring_engine_list_filter_created_at_eq(self):
|
||||||
|
res = self.dbapi.get_scoring_engine_list(
|
||||||
|
self.context, filters={'created_at__eq': self.FAKE_TODAY})
|
||||||
|
|
||||||
|
self.assertEqual([self.scoring_engine1['id']], [r.id for r in res])
|
||||||
|
|
||||||
|
def test_get_scoring_engine_list_filter_created_at_lt(self):
|
||||||
|
res = self.dbapi.get_scoring_engine_list(
|
||||||
|
self.context, filters={'created_at__lt': self.FAKE_TODAY})
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
set([self.scoring_engine2['id'], self.scoring_engine3['id']]),
|
||||||
|
set([r.id for r in res]))
|
||||||
|
|
||||||
|
def test_get_scoring_engine_list_filter_created_at_lte(self):
|
||||||
|
res = self.dbapi.get_scoring_engine_list(
|
||||||
|
self.context, filters={'created_at__lte': self.FAKE_OLD_DATE})
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
set([self.scoring_engine2['id'], self.scoring_engine3['id']]),
|
||||||
|
set([r.id for r in res]))
|
||||||
|
|
||||||
|
def test_get_scoring_engine_list_filter_created_at_gt(self):
|
||||||
|
res = self.dbapi.get_scoring_engine_list(
|
||||||
|
self.context, filters={'created_at__gt': self.FAKE_OLD_DATE})
|
||||||
|
|
||||||
|
self.assertEqual([self.scoring_engine1['id']], [r.id for r in res])
|
||||||
|
|
||||||
|
def test_get_scoring_engine_list_filter_created_at_gte(self):
|
||||||
|
res = self.dbapi.get_scoring_engine_list(
|
||||||
|
self.context, filters={'created_at__gte': self.FAKE_OLD_DATE})
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
set([self.scoring_engine1['id'], self.scoring_engine2['id']]),
|
||||||
|
set([r.id for r in res]))
|
||||||
|
|
||||||
|
# updated_at #
|
||||||
|
|
||||||
|
def test_get_scoring_engine_list_filter_updated_at_eq(self):
|
||||||
|
self._update_scoring_engines()
|
||||||
|
|
||||||
|
res = self.dbapi.get_scoring_engine_list(
|
||||||
|
self.context, filters={'updated_at__eq': self.FAKE_TODAY})
|
||||||
|
|
||||||
|
self.assertEqual([self.scoring_engine1['id']], [r.id for r in res])
|
||||||
|
|
||||||
|
def test_get_scoring_engine_list_filter_updated_at_lt(self):
|
||||||
|
self._update_scoring_engines()
|
||||||
|
|
||||||
|
res = self.dbapi.get_scoring_engine_list(
|
||||||
|
self.context, filters={'updated_at__lt': self.FAKE_TODAY})
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
set([self.scoring_engine2['id'], self.scoring_engine3['id']]),
|
||||||
|
set([r.id for r in res]))
|
||||||
|
|
||||||
|
def test_get_scoring_engine_list_filter_updated_at_lte(self):
|
||||||
|
self._update_scoring_engines()
|
||||||
|
|
||||||
|
res = self.dbapi.get_scoring_engine_list(
|
||||||
|
self.context, filters={'updated_at__lte': self.FAKE_OLD_DATE})
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
set([self.scoring_engine2['id'], self.scoring_engine3['id']]),
|
||||||
|
set([r.id for r in res]))
|
||||||
|
|
||||||
|
def test_get_scoring_engine_list_filter_updated_at_gt(self):
|
||||||
|
self._update_scoring_engines()
|
||||||
|
|
||||||
|
res = self.dbapi.get_scoring_engine_list(
|
||||||
|
self.context, filters={'updated_at__gt': self.FAKE_OLD_DATE})
|
||||||
|
|
||||||
|
self.assertEqual([self.scoring_engine1['id']], [r.id for r in res])
|
||||||
|
|
||||||
|
def test_get_scoring_engine_list_filter_updated_at_gte(self):
|
||||||
|
self._update_scoring_engines()
|
||||||
|
|
||||||
|
res = self.dbapi.get_scoring_engine_list(
|
||||||
|
self.context, filters={'updated_at__gte': self.FAKE_OLD_DATE})
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
set([self.scoring_engine1['id'], self.scoring_engine2['id']]),
|
||||||
|
set([r.id for r in res]))
|
||||||
|
|
||||||
|
|
||||||
|
class DbScoringEngineTestCase(base.DbTestCase):
|
||||||
|
|
||||||
|
def _create_test_scoring_engine(self, **kwargs):
|
||||||
|
scoring_engine = utils.get_test_scoring_engine(**kwargs)
|
||||||
|
self.dbapi.create_scoring_engine(scoring_engine)
|
||||||
|
return scoring_engine
|
||||||
|
|
||||||
|
def test_get_scoring_engine_list(self):
|
||||||
|
names = []
|
||||||
|
for i in range(1, 6):
|
||||||
|
scoring_engine = utils.create_test_scoring_engine(
|
||||||
|
id=i,
|
||||||
|
uuid=w_utils.generate_uuid(),
|
||||||
|
name="SE_ID_%s" % i,
|
||||||
|
description='My ScoringEngine {0}'.format(i),
|
||||||
|
metainfo='a{0}=b{0}'.format(i))
|
||||||
|
names.append(six.text_type(scoring_engine['name']))
|
||||||
|
res = self.dbapi.get_scoring_engine_list(self.context)
|
||||||
|
res_names = [r.name for r in res]
|
||||||
|
self.assertEqual(names.sort(), res_names.sort())
|
||||||
|
|
||||||
|
def test_get_scoring_engine_list_with_filters(self):
|
||||||
|
scoring_engine1 = self._create_test_scoring_engine(
|
||||||
|
id=1,
|
||||||
|
uuid=w_utils.generate_uuid(),
|
||||||
|
name="SE_ID_1",
|
||||||
|
description='ScoringEngine 1',
|
||||||
|
metainfo="a1=b1",
|
||||||
|
)
|
||||||
|
scoring_engine2 = self._create_test_scoring_engine(
|
||||||
|
id=2,
|
||||||
|
uuid=w_utils.generate_uuid(),
|
||||||
|
name="SE_ID_2",
|
||||||
|
description='ScoringEngine 2',
|
||||||
|
metainfo="a2=b2",
|
||||||
|
)
|
||||||
|
|
||||||
|
res = self.dbapi.get_scoring_engine_list(
|
||||||
|
self.context, filters={'description': 'ScoringEngine 1'})
|
||||||
|
self.assertEqual([scoring_engine1['name']], [r.name for r in res])
|
||||||
|
|
||||||
|
res = self.dbapi.get_scoring_engine_list(
|
||||||
|
self.context, filters={'description': 'ScoringEngine 3'})
|
||||||
|
self.assertEqual([], [r.name for r in res])
|
||||||
|
|
||||||
|
res = self.dbapi.get_scoring_engine_list(
|
||||||
|
self.context,
|
||||||
|
filters={'description': 'ScoringEngine 2'})
|
||||||
|
self.assertEqual([scoring_engine2['name']], [r.name for r in res])
|
||||||
|
|
||||||
|
def test_get_scoring_engine_by_id(self):
|
||||||
|
created_scoring_engine = self._create_test_scoring_engine()
|
||||||
|
scoring_engine = self.dbapi.get_scoring_engine_by_id(
|
||||||
|
self.context, created_scoring_engine['id'])
|
||||||
|
self.assertEqual(scoring_engine.id, created_scoring_engine['id'])
|
||||||
|
|
||||||
|
def test_get_scoring_engine_by_uuid(self):
|
||||||
|
created_scoring_engine = self._create_test_scoring_engine()
|
||||||
|
scoring_engine = self.dbapi.get_scoring_engine_by_uuid(
|
||||||
|
self.context, created_scoring_engine['uuid'])
|
||||||
|
self.assertEqual(scoring_engine.uuid, created_scoring_engine['uuid'])
|
||||||
|
|
||||||
|
def test_get_scoring_engine_by_name(self):
|
||||||
|
created_scoring_engine = self._create_test_scoring_engine()
|
||||||
|
scoring_engine = self.dbapi.get_scoring_engine_by_name(
|
||||||
|
self.context, created_scoring_engine['name'])
|
||||||
|
self.assertEqual(scoring_engine.name, created_scoring_engine['name'])
|
||||||
|
|
||||||
|
def test_get_scoring_engine_that_does_not_exist(self):
|
||||||
|
self.assertRaises(exception.ScoringEngineNotFound,
|
||||||
|
self.dbapi.get_scoring_engine_by_id,
|
||||||
|
self.context, 404)
|
||||||
|
|
||||||
|
def test_update_scoring_engine(self):
|
||||||
|
scoring_engine = self._create_test_scoring_engine()
|
||||||
|
res = self.dbapi.update_scoring_engine(
|
||||||
|
scoring_engine['id'], {'description': 'updated-model'})
|
||||||
|
self.assertEqual('updated-model', res.description)
|
||||||
|
|
||||||
|
def test_update_scoring_engine_id(self):
|
||||||
|
scoring_engine = self._create_test_scoring_engine()
|
||||||
|
self.assertRaises(exception.Invalid,
|
||||||
|
self.dbapi.update_scoring_engine,
|
||||||
|
scoring_engine['id'],
|
||||||
|
{'id': 5})
|
||||||
|
|
||||||
|
def test_update_scoring_engine_that_does_not_exist(self):
|
||||||
|
self.assertRaises(exception.ScoringEngineNotFound,
|
||||||
|
self.dbapi.update_scoring_engine,
|
||||||
|
404,
|
||||||
|
{'description': ''})
|
||||||
|
|
||||||
|
def test_destroy_scoring_engine(self):
|
||||||
|
scoring_engine = self._create_test_scoring_engine()
|
||||||
|
self.dbapi.destroy_scoring_engine(scoring_engine['id'])
|
||||||
|
self.assertRaises(exception.ScoringEngineNotFound,
|
||||||
|
self.dbapi.get_scoring_engine_by_id,
|
||||||
|
self.context, scoring_engine['id'])
|
||||||
|
|
||||||
|
def test_destroy_scoring_engine_that_does_not_exist(self):
|
||||||
|
self.assertRaises(exception.ScoringEngineNotFound,
|
||||||
|
self.dbapi.destroy_scoring_engine, 404)
|
||||||
|
|
||||||
|
def test_create_scoring_engine_already_exists(self):
|
||||||
|
scoring_engine_id = "SE_ID"
|
||||||
|
self._create_test_scoring_engine(name=scoring_engine_id)
|
||||||
|
self.assertRaises(exception.ScoringEngineAlreadyExists,
|
||||||
|
self._create_test_scoring_engine,
|
||||||
|
name=scoring_engine_id)
|
@ -170,6 +170,31 @@ def create_test_goal(**kwargs):
|
|||||||
return dbapi.create_goal(goal)
|
return dbapi.create_goal(goal)
|
||||||
|
|
||||||
|
|
||||||
|
def get_test_scoring_engine(**kwargs):
|
||||||
|
return {
|
||||||
|
'id': kwargs.get('id', 1),
|
||||||
|
'uuid': kwargs.get('uuid', 'e8370ede-4f39-11e6-9ffa-08002722cb21'),
|
||||||
|
'name': kwargs.get('name', 'test-se-01'),
|
||||||
|
'description': kwargs.get('description', 'test scoring engine 01'),
|
||||||
|
'metainfo': kwargs.get('metainfo', 'test_attr=test_val'),
|
||||||
|
'created_at': kwargs.get('created_at'),
|
||||||
|
'updated_at': kwargs.get('updated_at'),
|
||||||
|
'deleted_at': kwargs.get('deleted_at'),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def create_test_scoring_engine(**kwargs):
|
||||||
|
"""Create test scoring engine in DB and return ScoringEngine DB object.
|
||||||
|
|
||||||
|
Function to be used to create test ScoringEngine objects in the database.
|
||||||
|
:param kwargs: kwargs with overriding values for SE'sattributes.
|
||||||
|
:returns: Test ScoringEngine DB object.
|
||||||
|
"""
|
||||||
|
scoring_engine = get_test_scoring_engine(**kwargs)
|
||||||
|
dbapi = db_api.get_instance()
|
||||||
|
return dbapi.create_scoring_engine(scoring_engine)
|
||||||
|
|
||||||
|
|
||||||
def get_test_strategy(**kwargs):
|
def get_test_strategy(**kwargs):
|
||||||
return {
|
return {
|
||||||
'id': kwargs.get('id', 1),
|
'id': kwargs.get('id', 1),
|
||||||
|
@ -47,6 +47,10 @@ policy_data = """
|
|||||||
"goal:get": "",
|
"goal:get": "",
|
||||||
"goal:get_all": "",
|
"goal:get_all": "",
|
||||||
|
|
||||||
|
"scoring_engine:detail": "",
|
||||||
|
"scoring_engine:get": "",
|
||||||
|
"scoring_engine:get_all": "",
|
||||||
|
|
||||||
"strategy:detail": "",
|
"strategy:detail": "",
|
||||||
"strategy:get": "",
|
"strategy:get": "",
|
||||||
"strategy:get_all": ""
|
"strategy:get_all": ""
|
||||||
|
150
watcher/tests/objects/test_scoring_engine.py
Normal file
150
watcher/tests/objects/test_scoring_engine.py
Normal file
@ -0,0 +1,150 @@
|
|||||||
|
# Copyright 2016 Intel
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import mock
|
||||||
|
from testtools import matchers
|
||||||
|
|
||||||
|
from watcher import objects
|
||||||
|
from watcher.tests.db import base
|
||||||
|
from watcher.tests.db import utils
|
||||||
|
|
||||||
|
|
||||||
|
class TestScoringEngineObject(base.DbTestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(TestScoringEngineObject, self).setUp()
|
||||||
|
self.fake_scoring_engine = utils.get_test_scoring_engine()
|
||||||
|
|
||||||
|
def test_get_by_id(self):
|
||||||
|
scoring_engine_id = self.fake_scoring_engine['id']
|
||||||
|
with mock.patch.object(self.dbapi, 'get_scoring_engine_by_id',
|
||||||
|
autospec=True) as mock_get_scoring_engine:
|
||||||
|
mock_get_scoring_engine.return_value = self.fake_scoring_engine
|
||||||
|
scoring_engine = objects.ScoringEngine.get_by_id(
|
||||||
|
self.context, scoring_engine_id)
|
||||||
|
mock_get_scoring_engine.assert_called_once_with(self.context,
|
||||||
|
scoring_engine_id)
|
||||||
|
self.assertEqual(self.context, scoring_engine._context)
|
||||||
|
|
||||||
|
def test_get_by_uuid(self):
|
||||||
|
se_uuid = self.fake_scoring_engine['uuid']
|
||||||
|
with mock.patch.object(self.dbapi, 'get_scoring_engine_by_uuid',
|
||||||
|
autospec=True) as mock_get_scoring_engine:
|
||||||
|
mock_get_scoring_engine.return_value = self.fake_scoring_engine
|
||||||
|
scoring_engine = objects.ScoringEngine.get_by_uuid(
|
||||||
|
self.context, se_uuid)
|
||||||
|
mock_get_scoring_engine.assert_called_once_with(self.context,
|
||||||
|
se_uuid)
|
||||||
|
self.assertEqual(self.context, scoring_engine._context)
|
||||||
|
|
||||||
|
def test_get_by_name(self):
|
||||||
|
scoring_engine_uuid = self.fake_scoring_engine['uuid']
|
||||||
|
with mock.patch.object(self.dbapi, 'get_scoring_engine_by_uuid',
|
||||||
|
autospec=True) as mock_get_scoring_engine:
|
||||||
|
mock_get_scoring_engine.return_value = self.fake_scoring_engine
|
||||||
|
scoring_engine = objects.ScoringEngine.get(
|
||||||
|
self.context, scoring_engine_uuid)
|
||||||
|
mock_get_scoring_engine.assert_called_once_with(
|
||||||
|
self.context, scoring_engine_uuid)
|
||||||
|
self.assertEqual(self.context, scoring_engine._context)
|
||||||
|
|
||||||
|
def test_list(self):
|
||||||
|
with mock.patch.object(self.dbapi, 'get_scoring_engine_list',
|
||||||
|
autospec=True) as mock_get_list:
|
||||||
|
mock_get_list.return_value = [self.fake_scoring_engine]
|
||||||
|
scoring_engines = objects.ScoringEngine.list(self.context)
|
||||||
|
self.assertEqual(1, mock_get_list.call_count, 1)
|
||||||
|
self.assertThat(scoring_engines, matchers.HasLength(1))
|
||||||
|
self.assertIsInstance(scoring_engines[0], objects.ScoringEngine)
|
||||||
|
self.assertEqual(self.context, scoring_engines[0]._context)
|
||||||
|
|
||||||
|
def test_create(self):
|
||||||
|
with mock.patch.object(self.dbapi, 'create_scoring_engine',
|
||||||
|
autospec=True) as mock_create_scoring_engine:
|
||||||
|
mock_create_scoring_engine.return_value = self.fake_scoring_engine
|
||||||
|
scoring_engine = objects.ScoringEngine(
|
||||||
|
self.context, **self.fake_scoring_engine)
|
||||||
|
|
||||||
|
scoring_engine.create()
|
||||||
|
mock_create_scoring_engine.assert_called_once_with(
|
||||||
|
self.fake_scoring_engine)
|
||||||
|
self.assertEqual(self.context, scoring_engine._context)
|
||||||
|
|
||||||
|
def test_destroy(self):
|
||||||
|
_id = self.fake_scoring_engine['id']
|
||||||
|
with mock.patch.object(self.dbapi, 'get_scoring_engine_by_id',
|
||||||
|
autospec=True) as mock_get_scoring_engine:
|
||||||
|
mock_get_scoring_engine.return_value = self.fake_scoring_engine
|
||||||
|
with mock.patch.object(
|
||||||
|
self.dbapi, 'destroy_scoring_engine',
|
||||||
|
autospec=True) as mock_destroy_scoring_engine:
|
||||||
|
scoring_engine = objects.ScoringEngine.get_by_id(
|
||||||
|
self.context, _id)
|
||||||
|
scoring_engine.destroy()
|
||||||
|
mock_get_scoring_engine.assert_called_once_with(
|
||||||
|
self.context, _id)
|
||||||
|
mock_destroy_scoring_engine.assert_called_once_with(_id)
|
||||||
|
self.assertEqual(self.context, scoring_engine._context)
|
||||||
|
|
||||||
|
def test_save(self):
|
||||||
|
_id = self.fake_scoring_engine['id']
|
||||||
|
with mock.patch.object(self.dbapi, 'get_scoring_engine_by_id',
|
||||||
|
autospec=True) as mock_get_scoring_engine:
|
||||||
|
mock_get_scoring_engine.return_value = self.fake_scoring_engine
|
||||||
|
with mock.patch.object(
|
||||||
|
self.dbapi, 'update_scoring_engine',
|
||||||
|
autospec=True) as mock_update_scoring_engine:
|
||||||
|
scoring_engine = objects.ScoringEngine.get_by_id(
|
||||||
|
self.context, _id)
|
||||||
|
scoring_engine.description = 'UPDATED DESCRIPTION'
|
||||||
|
scoring_engine.save()
|
||||||
|
|
||||||
|
mock_get_scoring_engine.assert_called_once_with(
|
||||||
|
self.context, _id)
|
||||||
|
mock_update_scoring_engine.assert_called_once_with(
|
||||||
|
_id, {'description': 'UPDATED DESCRIPTION'})
|
||||||
|
self.assertEqual(self.context, scoring_engine._context)
|
||||||
|
|
||||||
|
def test_refresh(self):
|
||||||
|
_id = self.fake_scoring_engine['id']
|
||||||
|
returns = [
|
||||||
|
dict(self.fake_scoring_engine, description="first description"),
|
||||||
|
dict(self.fake_scoring_engine, description="second description")]
|
||||||
|
expected = [mock.call(self.context, _id),
|
||||||
|
mock.call(self.context, _id)]
|
||||||
|
with mock.patch.object(self.dbapi, 'get_scoring_engine_by_id',
|
||||||
|
side_effect=returns,
|
||||||
|
autospec=True) as mock_get_scoring_engine:
|
||||||
|
scoring_engine = objects.ScoringEngine.get_by_id(self.context, _id)
|
||||||
|
self.assertEqual("first description", scoring_engine.description)
|
||||||
|
scoring_engine.refresh()
|
||||||
|
self.assertEqual("second description", scoring_engine.description)
|
||||||
|
self.assertEqual(expected, mock_get_scoring_engine.call_args_list)
|
||||||
|
self.assertEqual(self.context, scoring_engine._context)
|
||||||
|
|
||||||
|
def test_soft_delete(self):
|
||||||
|
_id = self.fake_scoring_engine['id']
|
||||||
|
with mock.patch.object(self.dbapi, 'get_scoring_engine_by_id',
|
||||||
|
autospec=True) as mock_get_scoring_engine:
|
||||||
|
mock_get_scoring_engine.return_value = self.fake_scoring_engine
|
||||||
|
with mock.patch.object(self.dbapi, 'soft_delete_scoring_engine',
|
||||||
|
autospec=True) as mock_soft_delete:
|
||||||
|
scoring_engine = objects.ScoringEngine.get_by_id(
|
||||||
|
self.context, _id)
|
||||||
|
scoring_engine.soft_delete()
|
||||||
|
mock_get_scoring_engine.assert_called_once_with(
|
||||||
|
self.context, _id)
|
||||||
|
mock_soft_delete.assert_called_once_with(_id)
|
||||||
|
self.assertEqual(self.context, scoring_engine._context)
|
@ -164,6 +164,30 @@ def create_test_goal(context, **kw):
|
|||||||
return goal
|
return goal
|
||||||
|
|
||||||
|
|
||||||
|
def get_test_scoring_engine(context, **kw):
|
||||||
|
"""Return a ScoringEngine object with appropriate attributes.
|
||||||
|
|
||||||
|
NOTE: The object leaves the attributes marked as changed, such
|
||||||
|
that a create() could be used to commit it to the DB.
|
||||||
|
"""
|
||||||
|
db_scoring_engine = db_utils.get_test_scoring_engine(**kw)
|
||||||
|
scoring_engine = objects.ScoringEngine(context)
|
||||||
|
for key in db_scoring_engine:
|
||||||
|
setattr(scoring_engine, key, db_scoring_engine[key])
|
||||||
|
return scoring_engine
|
||||||
|
|
||||||
|
|
||||||
|
def create_test_scoring_engine(context, **kw):
|
||||||
|
"""Create and return a test scoring engine object.
|
||||||
|
|
||||||
|
Create a scoring engine in the DB and return a ScoringEngine object with
|
||||||
|
appropriate attributes.
|
||||||
|
"""
|
||||||
|
scoring_engine = get_test_scoring_engine(context, **kw)
|
||||||
|
scoring_engine.create()
|
||||||
|
return scoring_engine
|
||||||
|
|
||||||
|
|
||||||
def get_test_strategy(context, **kw):
|
def get_test_strategy(context, **kw):
|
||||||
"""Return a Strategy object with appropriate attributes.
|
"""Return a Strategy object with appropriate attributes.
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user