Remove log translations

Log messages are no longer being translated. This removes all use of
the _LE, _LI, and _LW translation markers to simplify logging and to
avoid confusion with new contributions.

See:
http://lists.openstack.org/pipermail/openstack-i18n/2016-November/002574.html
http://lists.openstack.org/pipermail/openstack-dev/2017-March/113365.html

Change-Id: I3552767976807a9851af69b1fa4f86ac25943025
This commit is contained in:
yanxubin 2017-03-22 16:01:23 +08:00
parent eb038e4af0
commit f605888e32
27 changed files with 154 additions and 170 deletions

View File

@ -45,7 +45,7 @@ commands =
[flake8] [flake8]
show-source=True show-source=True
ignore= H105,E123,E226 ignore= H105,E123,E226,N320
builtins= _ builtins= _
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build,*sqlalchemy/alembic/versions/*,demo/,releasenotes exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build,*sqlalchemy/alembic/versions/*,demo/,releasenotes

View File

@ -30,7 +30,6 @@ import wsme
from wsme import types as wtypes from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan import wsmeext.pecan as wsme_pecan
from watcher._i18n import _LW
from watcher.api.controllers import base from watcher.api.controllers import base
from watcher.api.controllers import link from watcher.api.controllers import link
from watcher.api.controllers.v1 import collection from watcher.api.controllers.v1 import collection
@ -72,9 +71,9 @@ class Service(base.APIBase):
elapsed = timeutils.delta_seconds(last_heartbeat, timeutils.utcnow()) elapsed = timeutils.delta_seconds(last_heartbeat, timeutils.utcnow())
is_up = abs(elapsed) <= CONF.service_down_time is_up = abs(elapsed) <= CONF.service_down_time
if not is_up: if not is_up:
LOG.warning(_LW('Seems service %(name)s on host %(host)s is down. ' LOG.warning('Seems service %(name)s on host %(host)s is down. '
'Last heartbeat was %(lhb)s.' 'Last heartbeat was %(lhb)s.'
'Elapsed time is %(el)s'), 'Elapsed time is %(el)s',
{'name': service.name, {'name': service.name,
'host': service.host, 'host': service.host,
'lhb': str(last_heartbeat), 'el': str(elapsed)}) 'lhb': str(last_heartbeat), 'el': str(elapsed)})

View File

@ -27,7 +27,7 @@ from oslo_serialization import jsonutils
import six import six
import webob import webob
from watcher._i18n import _, _LE from watcher._i18n import _
LOG = log.getLogger(__name__) LOG = log.getLogger(__name__)
@ -79,7 +79,7 @@ class ParsableErrorMiddleware(object):
et.ElementTree.Element( et.ElementTree.Element(
'error_message', text='\n'.join(app_iter)))] 'error_message', text='\n'.join(app_iter)))]
except et.ElementTree.ParseError as err: except et.ElementTree.ParseError as err:
LOG.error(_LE('Error parsing HTTP response: %s'), err) LOG.error('Error parsing HTTP response: %s', err)
body = ['<error_message>%s' body = ['<error_message>%s'
'</error_message>' % state['status_code']] '</error_message>' % state['status_code']]
state['headers'].append(('Content-Type', 'application/xml')) state['headers'].append(('Content-Type', 'application/xml'))

View File

@ -21,7 +21,7 @@ from oslo_log import log
import six import six
import voluptuous import voluptuous
from watcher._i18n import _, _LC from watcher._i18n import _
from watcher.applier.actions import base from watcher.applier.actions import base
from watcher.common import exception from watcher.common import exception
from watcher.common import nova_helper from watcher.common import nova_helper
@ -120,9 +120,9 @@ class Migrate(base.BaseAction):
"migrating instance %s.Exception: %s" % "migrating instance %s.Exception: %s" %
(self.instance_uuid, e)) (self.instance_uuid, e))
except Exception: except Exception:
LOG.critical(_LC("Unexpected error occurred. Migration failed for " LOG.critical("Unexpected error occurred. Migration failed for "
"instance %s. Leaving instance on previous " "instance %s. Leaving instance on previous "
"host."), self.instance_uuid) "host.", self.instance_uuid)
return result return result
@ -134,9 +134,9 @@ class Migrate(base.BaseAction):
dest_hostname=destination) dest_hostname=destination)
except Exception as exc: except Exception as exc:
LOG.exception(exc) LOG.exception(exc)
LOG.critical(_LC("Unexpected error occurred. Migration failed for " LOG.critical("Unexpected error occurred. Migration failed for "
"instance %s. Leaving instance on previous " "instance %s. Leaving instance on previous "
"host."), self.instance_uuid) "host.", self.instance_uuid)
return result return result

View File

@ -21,7 +21,7 @@ from oslo_log import log
import six import six
import voluptuous import voluptuous
from watcher._i18n import _, _LC from watcher._i18n import _
from watcher.applier.actions import base from watcher.applier.actions import base
from watcher.common import nova_helper from watcher.common import nova_helper
from watcher.common import utils from watcher.common import utils
@ -86,8 +86,8 @@ class Resize(base.BaseAction):
except Exception as exc: except Exception as exc:
LOG.exception(exc) LOG.exception(exc)
LOG.critical( LOG.critical(
_LC("Unexpected error occurred. Resizing failed for " "Unexpected error occurred. Resizing failed for "
"instance %s."), self.instance_uuid) "instance %s.", self.instance_uuid)
return result return result
def execute(self): def execute(self):

View File

@ -22,7 +22,6 @@ from oslo_log import log
import six import six
from taskflow import task as flow_task from taskflow import task as flow_task
from watcher._i18n import _LE
from watcher.applier.actions import factory from watcher.applier.actions import factory
from watcher.common import clients from watcher.common import clients
from watcher.common.loader import loadable from watcher.common.loader import loadable
@ -151,8 +150,8 @@ class BaseTaskFlowActionContainer(flow_task.Task):
fields.NotificationPhase.END) fields.NotificationPhase.END)
except Exception as e: except Exception as e:
LOG.exception(e) LOG.exception(e)
LOG.error(_LE('The workflow engine has failed ' LOG.error('The workflow engine has failed '
'to execute the action: %s'), self.name) 'to execute the action: %s', self.name)
self.engine.notify(self._db_action, objects.action.State.FAILED) self.engine.notify(self._db_action, objects.action.State.FAILED)
notifications.action.send_execution_notification( notifications.action.send_execution_notification(
self.engine.context, self._db_action, self.engine.context, self._db_action,

View File

@ -22,7 +22,6 @@ from taskflow import engines
from taskflow.patterns import graph_flow as gf from taskflow.patterns import graph_flow as gf
from taskflow import task as flow_task from taskflow import task as flow_task
from watcher._i18n import _LW, _LC
from watcher.applier.workflow_engine import base from watcher.applier.workflow_engine import base
from watcher.common import exception from watcher.common import exception
from watcher import objects from watcher import objects
@ -117,13 +116,13 @@ class TaskFlowActionContainer(base.BaseTaskFlowActionContainer):
self.action.post_condition() self.action.post_condition()
def revert(self, *args, **kwargs): def revert(self, *args, **kwargs):
LOG.warning(_LW("Revert action: %s"), self.name) LOG.warning("Revert action: %s", self.name)
try: try:
# TODO(jed): do we need to update the states in case of failure? # TODO(jed): do we need to update the states in case of failure?
self.action.revert() self.action.revert()
except Exception as e: except Exception as e:
LOG.exception(e) LOG.exception(e)
LOG.critical(_LC("Oops! We need a disaster recover plan.")) LOG.critical("Oops! We need a disaster recover plan.")
class TaskFlowNop(flow_task.Task): class TaskFlowNop(flow_task.Task):

View File

@ -22,7 +22,6 @@ import sys
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from watcher._i18n import _LI
from watcher.common import service from watcher.common import service
from watcher import conf from watcher import conf
@ -39,11 +38,11 @@ def main():
server = service.WSGIService('watcher-api', CONF.api.enable_ssl_api) server = service.WSGIService('watcher-api', CONF.api.enable_ssl_api)
if host == '127.0.0.1': if host == '127.0.0.1':
LOG.info(_LI('serving on 127.0.0.1:%(port)s, ' LOG.info('serving on 127.0.0.1:%(port)s, '
'view at %(protocol)s://127.0.0.1:%(port)s') % 'view at %(protocol)s://127.0.0.1:%(port)s' %
dict(protocol=protocol, port=port)) dict(protocol=protocol, port=port))
else: else:
LOG.info(_LI('serving on %(protocol)s://%(host)s:%(port)s') % LOG.info('serving on %(protocol)s://%(host)s:%(port)s' %
dict(protocol=protocol, host=host, port=port)) dict(protocol=protocol, host=host, port=port))
launcher = service.launch(CONF, server, workers=server.workers) launcher = service.launch(CONF, server, workers=server.workers)

View File

@ -22,7 +22,6 @@ import sys
from oslo_log import log as logging from oslo_log import log as logging
from watcher._i18n import _LI
from watcher.applier import manager from watcher.applier import manager
from watcher.common import service as watcher_service from watcher.common import service as watcher_service
from watcher import conf from watcher import conf
@ -34,7 +33,7 @@ CONF = conf.CONF
def main(): def main():
watcher_service.prepare_service(sys.argv, CONF) watcher_service.prepare_service(sys.argv, CONF)
LOG.info(_LI('Starting Watcher Applier service in PID %s'), os.getpid()) LOG.info('Starting Watcher Applier service in PID %s', os.getpid())
applier_service = watcher_service.Service(manager.ApplierManager) applier_service = watcher_service.Service(manager.ApplierManager)

View File

@ -22,7 +22,6 @@ import sys
from oslo_log import log as logging from oslo_log import log as logging
from watcher._i18n import _LI
from watcher.common import service as watcher_service from watcher.common import service as watcher_service
from watcher import conf from watcher import conf
from watcher.decision_engine import gmr from watcher.decision_engine import gmr
@ -38,7 +37,7 @@ def main():
watcher_service.prepare_service(sys.argv, CONF) watcher_service.prepare_service(sys.argv, CONF)
gmr.register_gmr_plugins() gmr.register_gmr_plugins()
LOG.info(_LI('Starting Watcher Decision Engine service in PID %s'), LOG.info('Starting Watcher Decision Engine service in PID %s',
os.getpid()) os.getpid())
syncer = sync.Syncer() syncer = sync.Syncer()

View File

@ -22,7 +22,6 @@ import sys
from oslo_log import log as logging from oslo_log import log as logging
from watcher._i18n import _LI
from watcher.common import service as service from watcher.common import service as service
from watcher import conf from watcher import conf
from watcher.decision_engine import sync from watcher.decision_engine import sync
@ -32,10 +31,10 @@ CONF = conf.CONF
def main(): def main():
LOG.info(_LI('Watcher sync started.')) LOG.info('Watcher sync started.')
service.prepare_service(sys.argv, CONF) service.prepare_service(sys.argv, CONF)
syncer = sync.Syncer() syncer = sync.Syncer()
syncer.sync() syncer.sync()
LOG.info(_LI('Watcher sync finished.')) LOG.info('Watcher sync finished.')

View File

@ -15,7 +15,6 @@ from oslo_log import log as logging
from oslo_utils import timeutils from oslo_utils import timeutils
import six import six
from watcher._i18n import _LW
from watcher.common import utils from watcher.common import utils
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -65,7 +64,7 @@ class RequestContext(context.RequestContext):
# safely ignore this as we don't use it. # safely ignore this as we don't use it.
kwargs.pop('user_identity', None) kwargs.pop('user_identity', None)
if kwargs: if kwargs:
LOG.warning(_LW('Arguments dropped when creating context: %s'), LOG.warning('Arguments dropped when creating context: %s',
str(kwargs)) str(kwargs))
# FIXME(dims): user_id and project_id duplicate information that is # FIXME(dims): user_id and project_id duplicate information that is

View File

@ -29,7 +29,7 @@ from keystoneclient import exceptions as keystone_exceptions
from oslo_log import log as logging from oslo_log import log as logging
import six import six
from watcher._i18n import _, _LE from watcher._i18n import _
from watcher import conf from watcher import conf
@ -83,9 +83,9 @@ class WatcherException(Exception):
except Exception: except Exception:
# kwargs doesn't match a variable in msg_fmt # kwargs doesn't match a variable in msg_fmt
# log the issue and the kwargs # log the issue and the kwargs
LOG.exception(_LE('Exception in string format operation')) LOG.exception('Exception in string format operation')
for name, value in kwargs.items(): for name, value in kwargs.items():
LOG.error(_LE("%(name)s: %(value)s"), LOG.error("%(name)s: %(value)s",
{'name': name, 'value': value}) {'name': name, 'value': value})
if CONF.fatal_exception_format_errors: if CONF.fatal_exception_format_errors:

View File

@ -17,7 +17,6 @@ from oslo_config import cfg
from oslo_log import log from oslo_log import log
import oslo_messaging as messaging import oslo_messaging as messaging
from watcher._i18n import _LE
from watcher.common import context as watcher_context from watcher.common import context as watcher_context
from watcher.common import exception from watcher.common import exception
@ -74,7 +73,7 @@ def initialized():
def cleanup(): def cleanup():
global TRANSPORT, NOTIFICATION_TRANSPORT, NOTIFIER global TRANSPORT, NOTIFICATION_TRANSPORT, NOTIFIER
if NOTIFIER is None: if NOTIFIER is None:
LOG.exception(_LE("RPC cleanup: NOTIFIER is None")) LOG.exception("RPC cleanup: NOTIFIER is None")
TRANSPORT.cleanup() TRANSPORT.cleanup()
NOTIFICATION_TRANSPORT.cleanup() NOTIFICATION_TRANSPORT.cleanup()
TRANSPORT = NOTIFICATION_TRANSPORT = NOTIFIER = None TRANSPORT = NOTIFICATION_TRANSPORT = NOTIFIER = None

View File

@ -25,7 +25,6 @@ from oslo_utils import timeutils
from oslo_utils import uuidutils from oslo_utils import uuidutils
import six import six
from watcher._i18n import _LW
from watcher.common import exception from watcher.common import exception
from watcher import conf from watcher import conf
@ -73,9 +72,9 @@ def safe_rstrip(value, chars=None):
""" """
if not isinstance(value, six.string_types): if not isinstance(value, six.string_types):
LOG.warning(_LW( LOG.warning(
"Failed to remove trailing character. Returning original object." "Failed to remove trailing character. Returning original object."
"Supplied object is not a string: %s,"), value) "Supplied object is not a string: %s,", value)
return value return value
return value.rstrip(chars) or value return value.rstrip(chars) or value

View File

@ -27,7 +27,7 @@ from oslo_utils import strutils
import prettytable as ptable import prettytable as ptable
from six.moves import input from six.moves import input
from watcher._i18n import _, _LI from watcher._i18n import _
from watcher._i18n import lazy_translation_enabled from watcher._i18n import lazy_translation_enabled
from watcher.common import context from watcher.common import context
from watcher.common import exception from watcher.common import exception
@ -231,7 +231,7 @@ class PurgeCommand(object):
if action.action_plan_id not in action_plan_ids] if action.action_plan_id not in action_plan_ids]
LOG.debug("Orphans found:\n%s", orphans) LOG.debug("Orphans found:\n%s", orphans)
LOG.info(_LI("Orphans found:\n%s"), orphans.get_count_table()) LOG.info("Orphans found:\n%s", orphans.get_count_table())
return orphans return orphans
@ -403,13 +403,13 @@ class PurgeCommand(object):
return to_be_deleted return to_be_deleted
def do_delete(self): def do_delete(self):
LOG.info(_LI("Deleting...")) LOG.info("Deleting...")
# Reversed to avoid errors with foreign keys # Reversed to avoid errors with foreign keys
for entry in reversed(list(self._objects_map)): for entry in reversed(list(self._objects_map)):
entry.destroy() entry.destroy()
def execute(self): def execute(self):
LOG.info(_LI("Starting purge command")) LOG.info("Starting purge command")
self._objects_map = self.find_objects_to_delete() self._objects_map = self.find_objects_to_delete()
if (self.max_number is not None and if (self.max_number is not None and
@ -424,15 +424,15 @@ class PurgeCommand(object):
if not self.dry_run and self.confirmation_prompt(): if not self.dry_run and self.confirmation_prompt():
self.do_delete() self.do_delete()
print(_("Purge results summary%s:") % _orphans_note) print(_("Purge results summary%s:") % _orphans_note)
LOG.info(_LI("Purge results summary%s:"), _orphans_note) LOG.info("Purge results summary%s:", _orphans_note)
else: else:
LOG.debug(self._objects_map) LOG.debug(self._objects_map)
print(_("Here below is a table containing the objects " print(_("Here below is a table containing the objects "
"that can be purged%s:") % _orphans_note) "that can be purged%s:") % _orphans_note)
LOG.info(_LI("\n%s"), self._objects_map.get_count_table()) LOG.info("\n%s", self._objects_map.get_count_table())
print(self._objects_map.get_count_table()) print(self._objects_map.get_count_table())
LOG.info(_LI("Purge process completed")) LOG.info("Purge process completed")
def purge(age_in_days, max_number, goal, exclude_orphans, dry_run): def purge(age_in_days, max_number, goal, exclude_orphans, dry_run):
@ -457,11 +457,11 @@ def purge(age_in_days, max_number, goal, exclude_orphans, dry_run):
if max_number and max_number < 0: if max_number and max_number < 0:
raise exception.NegativeLimitError raise exception.NegativeLimitError
LOG.info(_LI("[options] age_in_days = %s"), age_in_days) LOG.info("[options] age_in_days = %s", age_in_days)
LOG.info(_LI("[options] max_number = %s"), max_number) LOG.info("[options] max_number = %s", max_number)
LOG.info(_LI("[options] goal = %s"), goal) LOG.info("[options] goal = %s", goal)
LOG.info(_LI("[options] exclude_orphans = %s"), exclude_orphans) LOG.info("[options] exclude_orphans = %s", exclude_orphans)
LOG.info(_LI("[options] dry_run = %s"), dry_run) LOG.info("[options] dry_run = %s", dry_run)
uuid = PurgeCommand.get_goal_uuid(goal) uuid = PurgeCommand.get_goal_uuid(goal)

View File

@ -17,7 +17,6 @@
# limitations under the License. # limitations under the License.
from oslo_log import log from oslo_log import log
from watcher._i18n import _LI, _LW
from watcher.common import exception from watcher.common import exception
from watcher.common import nova_helper from watcher.common import nova_helper
from watcher.decision_engine.model import element from watcher.decision_engine.model import element
@ -44,8 +43,8 @@ class NovaNotification(base.NotificationEndpoint):
if node_uuid: if node_uuid:
self.get_or_create_node(node_uuid) self.get_or_create_node(node_uuid)
except exception.ComputeNodeNotFound: except exception.ComputeNodeNotFound:
LOG.warning(_LW("Could not find compute node %(node)s for " LOG.warning("Could not find compute node %(node)s for "
"instance %(instance)s"), "instance %(instance)s",
dict(node=node_uuid, instance=instance_uuid)) dict(node=node_uuid, instance=instance_uuid))
try: try:
instance = self.cluster_data_model.get_instance_by_uuid( instance = self.cluster_data_model.get_instance_by_uuid(
@ -201,7 +200,7 @@ class NovaNotification(base.NotificationEndpoint):
try: try:
self.cluster_data_model.delete_instance(instance, node) self.cluster_data_model.delete_instance(instance, node)
except Exception: except Exception:
LOG.info(_LI("Instance %s already deleted"), instance.uuid) LOG.info("Instance %s already deleted", instance.uuid)
class VersionedNotificationEndpoint(NovaNotification): class VersionedNotificationEndpoint(NovaNotification):
@ -225,8 +224,8 @@ class ServiceUpdated(VersionedNotificationEndpoint):
def info(self, ctxt, publisher_id, event_type, payload, metadata): def info(self, ctxt, publisher_id, event_type, payload, metadata):
ctxt.request_id = metadata['message_id'] ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type ctxt.project_domain = event_type
LOG.info(_LI("Event '%(event)s' received from %(publisher)s " LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s") % "with metadata %(metadata)s" %
dict(event=event_type, dict(event=event_type,
publisher=publisher_id, publisher=publisher_id,
metadata=metadata)) metadata=metadata))
@ -269,8 +268,8 @@ class InstanceCreated(VersionedNotificationEndpoint):
def info(self, ctxt, publisher_id, event_type, payload, metadata): def info(self, ctxt, publisher_id, event_type, payload, metadata):
ctxt.request_id = metadata['message_id'] ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type ctxt.project_domain = event_type
LOG.info(_LI("Event '%(event)s' received from %(publisher)s " LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s") % "with metadata %(metadata)s" %
dict(event=event_type, dict(event=event_type,
publisher=publisher_id, publisher=publisher_id,
metadata=metadata)) metadata=metadata))
@ -304,8 +303,8 @@ class InstanceUpdated(VersionedNotificationEndpoint):
def info(self, ctxt, publisher_id, event_type, payload, metadata): def info(self, ctxt, publisher_id, event_type, payload, metadata):
ctxt.request_id = metadata['message_id'] ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type ctxt.project_domain = event_type
LOG.info(_LI("Event '%(event)s' received from %(publisher)s " LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s") % "with metadata %(metadata)s" %
dict(event=event_type, dict(event=event_type,
publisher=publisher_id, publisher=publisher_id,
metadata=metadata)) metadata=metadata))
@ -331,8 +330,8 @@ class InstanceDeletedEnd(VersionedNotificationEndpoint):
def info(self, ctxt, publisher_id, event_type, payload, metadata): def info(self, ctxt, publisher_id, event_type, payload, metadata):
ctxt.request_id = metadata['message_id'] ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type ctxt.project_domain = event_type
LOG.info(_LI("Event '%(event)s' received from %(publisher)s " LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s") % "with metadata %(metadata)s" %
dict(event=event_type, dict(event=event_type,
publisher=publisher_id, publisher=publisher_id,
metadata=metadata)) metadata=metadata))
@ -366,8 +365,8 @@ class LegacyInstanceUpdated(UnversionedNotificationEndpoint):
def info(self, ctxt, publisher_id, event_type, payload, metadata): def info(self, ctxt, publisher_id, event_type, payload, metadata):
ctxt.request_id = metadata['message_id'] ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type ctxt.project_domain = event_type
LOG.info(_LI("Event '%(event)s' received from %(publisher)s " LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s") % "with metadata %(metadata)s" %
dict(event=event_type, dict(event=event_type,
publisher=publisher_id, publisher=publisher_id,
metadata=metadata)) metadata=metadata))
@ -393,8 +392,8 @@ class LegacyInstanceCreatedEnd(UnversionedNotificationEndpoint):
def info(self, ctxt, publisher_id, event_type, payload, metadata): def info(self, ctxt, publisher_id, event_type, payload, metadata):
ctxt.request_id = metadata['message_id'] ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type ctxt.project_domain = event_type
LOG.info(_LI("Event '%(event)s' received from %(publisher)s " LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s") % "with metadata %(metadata)s" %
dict(event=event_type, dict(event=event_type,
publisher=publisher_id, publisher=publisher_id,
metadata=metadata)) metadata=metadata))
@ -420,8 +419,8 @@ class LegacyInstanceDeletedEnd(UnversionedNotificationEndpoint):
def info(self, ctxt, publisher_id, event_type, payload, metadata): def info(self, ctxt, publisher_id, event_type, payload, metadata):
ctxt.request_id = metadata['message_id'] ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type ctxt.project_domain = event_type
LOG.info(_LI("Event '%(event)s' received from %(publisher)s " LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s") % "with metadata %(metadata)s" %
dict(event=event_type, dict(event=event_type,
publisher=publisher_id, publisher=publisher_id,
metadata=metadata)) metadata=metadata))
@ -453,8 +452,8 @@ class LegacyLiveMigratedEnd(UnversionedNotificationEndpoint):
def info(self, ctxt, publisher_id, event_type, payload, metadata): def info(self, ctxt, publisher_id, event_type, payload, metadata):
ctxt.request_id = metadata['message_id'] ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type ctxt.project_domain = event_type
LOG.info(_LI("Event '%(event)s' received from %(publisher)s " LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s") % "with metadata %(metadata)s" %
dict(event=event_type, dict(event=event_type,
publisher=publisher_id, publisher=publisher_id,
metadata=metadata)) metadata=metadata))

View File

@ -22,7 +22,6 @@ from oslo_config import cfg
from oslo_config import types from oslo_config import types
from oslo_log import log from oslo_log import log
from watcher._i18n import _LW
from watcher.common import utils from watcher.common import utils
from watcher.decision_engine.planner import base from watcher.decision_engine.planner import base
from watcher import objects from watcher import objects
@ -152,7 +151,7 @@ class WeightPlanner(base.BasePlanner):
context, action_plan.id, solution.efficacy_indicators) context, action_plan.id, solution.efficacy_indicators)
if len(action_graph.nodes()) == 0: if len(action_graph.nodes()) == 0:
LOG.warning(_LW("The action plan is empty")) LOG.warning("The action plan is empty")
action_plan.state = objects.action_plan.State.SUCCEEDED action_plan.state = objects.action_plan.State.SUCCEEDED
action_plan.save() action_plan.save()

View File

@ -20,7 +20,6 @@ from oslo_config import cfg
from oslo_config import types from oslo_config import types
from oslo_log import log from oslo_log import log
from watcher._i18n import _LW
from watcher.common import clients from watcher.common import clients
from watcher.common import exception from watcher.common import exception
from watcher.common import nova_helper from watcher.common import nova_helper
@ -117,7 +116,7 @@ class WorkloadStabilizationPlanner(base.BasePlanner):
scheduled = sorted(to_schedule, key=lambda weight: (weight[0]), scheduled = sorted(to_schedule, key=lambda weight: (weight[0]),
reverse=True) reverse=True)
if len(scheduled) == 0: if len(scheduled) == 0:
LOG.warning(_LW("The action plan is empty")) LOG.warning("The action plan is empty")
action_plan.state = objects.action_plan.State.SUCCEEDED action_plan.state = objects.action_plan.State.SUCCEEDED
action_plan.save() action_plan.save()
else: else:

View File

@ -16,7 +16,6 @@
from oslo_log import log from oslo_log import log
from watcher._i18n import _LW
from watcher.common import exception from watcher.common import exception
from watcher.common import nova_helper from watcher.common import nova_helper
from watcher.decision_engine.scope import base from watcher.decision_engine.scope import base
@ -170,9 +169,9 @@ class DefaultScope(base.BaseScope):
node_name = cluster_model.get_node_by_instance_uuid( node_name = cluster_model.get_node_by_instance_uuid(
instance_uuid).uuid instance_uuid).uuid
except exception.ComputeResourceNotFound: except exception.ComputeResourceNotFound:
LOG.warning(_LW("The following instance %s cannot be found. " LOG.warning("The following instance %s cannot be found. "
"It might be deleted from CDM along with node" "It might be deleted from CDM along with node"
" instance was hosted on."), " instance was hosted on.",
instance_uuid) instance_uuid)
continue continue
self.remove_instance( self.remove_instance(

View File

@ -38,7 +38,7 @@ migration is possible on your OpenStack cluster.
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log from oslo_log import log
from watcher._i18n import _, _LE, _LI, _LW from watcher._i18n import _
from watcher.common import exception from watcher.common import exception
from watcher.datasource import ceilometer as ceil from watcher.datasource import ceilometer as ceil
from watcher.datasource import monasca as mon from watcher.datasource import monasca as mon
@ -319,11 +319,11 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
if host_avg_cpu_util is None: if host_avg_cpu_util is None:
resource_id = "%s_%s" % (node.uuid, node.hostname) resource_id = "%s_%s" % (node.uuid, node.hostname)
LOG.error( LOG.error(
_LE("No values returned by %(resource_id)s " "No values returned by %(resource_id)s "
"for %(metric_name)s") % dict( "for %(metric_name)s" % dict(
resource_id=resource_id, resource_id=resource_id,
metric_name=self.METRIC_NAMES[ metric_name=self.METRIC_NAMES[
self.config.datasource]['host_cpu_usage'])) self.config.datasource]['host_cpu_usage']))
host_avg_cpu_util = 100 host_avg_cpu_util = 100
total_cores_used = node.vcpus * (host_avg_cpu_util / 100.0) total_cores_used = node.vcpus * (host_avg_cpu_util / 100.0)
@ -339,11 +339,11 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
instance_cpu_utilization = self.get_instance_cpu_usage(instance) instance_cpu_utilization = self.get_instance_cpu_usage(instance)
if instance_cpu_utilization is None: if instance_cpu_utilization is None:
LOG.error( LOG.error(
_LE("No values returned by %(resource_id)s " "No values returned by %(resource_id)s "
"for %(metric_name)s") % dict( "for %(metric_name)s" % dict(
resource_id=instance.uuid, resource_id=instance.uuid,
metric_name=self.METRIC_NAMES[ metric_name=self.METRIC_NAMES[
self.config.datasource]['instance_cpu_usage'])) self.config.datasource]['instance_cpu_usage']))
instance_cpu_utilization = 100 instance_cpu_utilization = 100
total_cores_used = instance.vcpus * (instance_cpu_utilization / 100.0) total_cores_used = instance.vcpus * (instance_cpu_utilization / 100.0)
@ -439,7 +439,7 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
return unsuccessful_migration + 1 return unsuccessful_migration + 1
def pre_execute(self): def pre_execute(self):
LOG.info(_LI("Initializing Server Consolidation")) LOG.info("Initializing Server Consolidation")
if not self.compute_model: if not self.compute_model:
raise exception.ClusterStateNotDefined() raise exception.ClusterStateNotDefined()
@ -461,9 +461,9 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
LOG.debug("Compute node(s) BFD %s", sorted_scores) LOG.debug("Compute node(s) BFD %s", sorted_scores)
# Get Node to be released # Get Node to be released
if len(scores) == 0: if len(scores) == 0:
LOG.warning(_LW( LOG.warning(
"The workloads of the compute nodes" "The workloads of the compute nodes"
" of the cluster is zero")) " of the cluster is zero")
return return
while sorted_scores and ( while sorted_scores and (

View File

@ -30,7 +30,7 @@ telemetries to measure thermal/workload status of server.
from oslo_log import log from oslo_log import log
from watcher._i18n import _, _LW, _LI from watcher._i18n import _
from watcher.common import exception as wexc from watcher.common import exception as wexc
from watcher.datasource import ceilometer as ceil from watcher.datasource import ceilometer as ceil
from watcher.decision_engine.model import element from watcher.decision_engine.model import element
@ -153,7 +153,7 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
aggregate='avg') aggregate='avg')
# some hosts may not have outlet temp meters, remove from target # some hosts may not have outlet temp meters, remove from target
if outlet_temp is None: if outlet_temp is None:
LOG.warning(_LW("%s: no outlet temp data"), resource_id) LOG.warning("%s: no outlet temp data", resource_id)
continue continue
LOG.debug("%s: outlet temperature %f" % (resource_id, outlet_temp)) LOG.debug("%s: outlet temperature %f" % (resource_id, outlet_temp))
@ -176,13 +176,13 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
# select the first active instance to migrate # select the first active instance to migrate
if (instance.state != if (instance.state !=
element.InstanceState.ACTIVE.value): element.InstanceState.ACTIVE.value):
LOG.info(_LI("Instance not active, skipped: %s"), LOG.info("Instance not active, skipped: %s",
instance.uuid) instance.uuid)
continue continue
return mig_source_node, instance return mig_source_node, instance
except wexc.InstanceNotFound as e: except wexc.InstanceNotFound as e:
LOG.exception(e) LOG.exception(e)
LOG.info(_LI("Instance not found")) LOG.info("Instance not found")
return None return None
@ -233,7 +233,7 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
return self.solution return self.solution
if len(hosts_target) == 0: if len(hosts_target) == 0:
LOG.warning(_LW("No hosts under outlet temp threshold found")) LOG.warning("No hosts under outlet temp threshold found")
return self.solution return self.solution
# choose the server with highest outlet t # choose the server with highest outlet t
@ -254,7 +254,7 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
if len(dest_servers) == 0: if len(dest_servers) == 0:
# TODO(zhenzanz): maybe to warn that there's no resource # TODO(zhenzanz): maybe to warn that there's no resource
# for instance. # for instance.
LOG.info(_LI("No proper target host could be found")) LOG.info("No proper target host could be found")
return self.solution return self.solution
dest_servers = sorted(dest_servers, key=lambda x: (x["outlet_temp"])) dest_servers = sorted(dest_servers, key=lambda x: (x["outlet_temp"]))

View File

@ -45,7 +45,7 @@ airflow is higher than the specified threshold.
from oslo_log import log from oslo_log import log
from watcher._i18n import _, _LI, _LW from watcher._i18n import _
from watcher.common import exception as wexc from watcher.common import exception as wexc
from watcher.datasource import ceilometer as ceil from watcher.datasource import ceilometer as ceil
from watcher.decision_engine.model import element from watcher.decision_engine.model import element
@ -210,13 +210,13 @@ class UniformAirflow(base.BaseStrategy):
if (instance.state != if (instance.state !=
element.InstanceState.ACTIVE.value): element.InstanceState.ACTIVE.value):
LOG.info( LOG.info(
_LI("Instance not active, skipped: %s"), "Instance not active, skipped: %s",
instance.uuid) instance.uuid)
continue continue
instances_tobe_migrate.append(instance) instances_tobe_migrate.append(instance)
return source_node, instances_tobe_migrate return source_node, instances_tobe_migrate
else: else:
LOG.info(_LI("Instance not found on node: %s"), LOG.info("Instance not found on node: %s",
source_node.uuid) source_node.uuid)
def filter_destination_hosts(self, hosts, instances_to_migrate): def filter_destination_hosts(self, hosts, instances_to_migrate):
@ -257,8 +257,8 @@ class UniformAirflow(base.BaseStrategy):
break break
# check if all instances have target hosts # check if all instances have target hosts
if len(destination_hosts) != len(instances_to_migrate): if len(destination_hosts) != len(instances_to_migrate):
LOG.warning(_LW("Not all target hosts could be found; it might " LOG.warning("Not all target hosts could be found; it might "
"be because there is not enough resource")) "be because there is not enough resource")
return None return None
return destination_hosts return destination_hosts
@ -281,7 +281,7 @@ class UniformAirflow(base.BaseStrategy):
aggregate='avg') aggregate='avg')
# some hosts may not have airflow meter, remove from target # some hosts may not have airflow meter, remove from target
if airflow is None: if airflow is None:
LOG.warning(_LW("%s: no airflow data"), resource_id) LOG.warning("%s: no airflow data", resource_id)
continue continue
LOG.debug("%s: airflow %f" % (resource_id, airflow)) LOG.debug("%s: airflow %f" % (resource_id, airflow))
@ -316,9 +316,9 @@ class UniformAirflow(base.BaseStrategy):
return self.solution return self.solution
if not target_nodes: if not target_nodes:
LOG.warning(_LW("No hosts currently have airflow under %s, " LOG.warning("No hosts currently have airflow under %s, "
"therefore there are no possible target " "therefore there are no possible target "
"hosts for any migration"), "hosts for any migration",
self.threshold_airflow) self.threshold_airflow)
return self.solution return self.solution
@ -337,8 +337,8 @@ class UniformAirflow(base.BaseStrategy):
destination_hosts = self.filter_destination_hosts( destination_hosts = self.filter_destination_hosts(
target_nodes, instances_src) target_nodes, instances_src)
if not destination_hosts: if not destination_hosts:
LOG.warning(_LW("No target host could be found; it might " LOG.warning("No target host could be found; it might "
"be because there is not enough resources")) "be because there is not enough resources")
return self.solution return self.solution
# generate solution to migrate the instance to the dest server, # generate solution to migrate the instance to the dest server,
for info in destination_hosts: for info in destination_hosts:

View File

@ -56,7 +56,7 @@ an active compute node to any other active compute node.
from oslo_log import log from oslo_log import log
import six import six
from watcher._i18n import _, _LE, _LI from watcher._i18n import _
from watcher.common import exception from watcher.common import exception
from watcher.datasource import ceilometer as ceil from watcher.datasource import ceilometer as ceil
from watcher.decision_engine.model import element from watcher.decision_engine.model import element
@ -107,10 +107,10 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
elif isinstance(state, (element.InstanceState, element.ServiceState)): elif isinstance(state, (element.InstanceState, element.ServiceState)):
return state.value return state.value
else: else:
LOG.error(_LE('Unexpexted resource state type, ' LOG.error('Unexpected resource state type, '
'state=%(state)s, state_type=%(st)s.') % dict( 'state=%(state)s, state_type=%(st)s.' %
state=state, dict(state=state,
st=type(state))) st=type(state)))
raise exception.WatcherException raise exception.WatcherException
def add_action_enable_compute_node(self, node): def add_action_enable_compute_node(self, node):
@ -154,10 +154,10 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
# When supported, the cold migration may be used as a fallback # When supported, the cold migration may be used as a fallback
# migration mechanism to move non active VMs. # migration mechanism to move non active VMs.
LOG.error( LOG.error(
_LE('Cannot live migrate: instance_uuid=%(instance_uuid)s, ' 'Cannot live migrate: instance_uuid=%(instance_uuid)s, '
'state=%(instance_state)s.') % dict( 'state=%(instance_state)s.' % dict(
instance_uuid=instance.uuid, instance_uuid=instance.uuid,
instance_state=instance_state_str)) instance_state=instance_state_str))
return return
migration_type = 'live' migration_type = 'live'
@ -229,8 +229,8 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
if not instance_ram_util or not instance_disk_util: if not instance_ram_util or not instance_disk_util:
LOG.error( LOG.error(
_LE('No values returned by %s for memory.usage ' 'No values returned by %s for memory.usage '
'or disk.root.size'), instance.uuid) 'or disk.root.size', instance.uuid)
raise exception.NoDataFound raise exception.NoDataFound
self.ceilometer_instance_data_cache[instance.uuid] = dict( self.ceilometer_instance_data_cache[instance.uuid] = dict(
@ -475,7 +475,7 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
:param original_model: root_model object :param original_model: root_model object
""" """
LOG.info(_LI('Executing Smart Strategy')) LOG.info('Executing Smart Strategy')
rcu = self.get_relative_cluster_utilization() rcu = self.get_relative_cluster_utilization()
cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0} cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0}

View File

@ -49,7 +49,7 @@ hosts nodes.
from oslo_log import log from oslo_log import log
from watcher._i18n import _, _LE, _LI, _LW from watcher._i18n import _
from watcher.common import exception as wexc from watcher.common import exception as wexc
from watcher.datasource import ceilometer as ceil from watcher.datasource import ceilometer as ceil
from watcher.decision_engine.model import element from watcher.decision_engine.model import element
@ -187,14 +187,14 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
min_delta = current_delta min_delta = current_delta
instance_id = instance.uuid instance_id = instance.uuid
except wexc.InstanceNotFound: except wexc.InstanceNotFound:
LOG.error(_LE("Instance not found; error: %s"), LOG.error("Instance not found; error: %s",
instance_id) instance_id)
if instance_id: if instance_id:
return (source_node, return (source_node,
self.compute_model.get_instance_by_uuid( self.compute_model.get_instance_by_uuid(
instance_id)) instance_id))
else: else:
LOG.info(_LI("VM not found from node: %s"), LOG.info("VM not found from node: %s",
source_node.uuid) source_node.uuid)
def filter_destination_hosts(self, hosts, instance_to_migrate, def filter_destination_hosts(self, hosts, instance_to_migrate,
@ -259,7 +259,7 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
aggregate='avg') aggregate='avg')
except Exception as exc: except Exception as exc:
LOG.exception(exc) LOG.exception(exc)
LOG.error(_LE("Can not get cpu_util from Ceilometer")) LOG.error("Can not get cpu_util from Ceilometer")
continue continue
if cpu_util is None: if cpu_util is None:
LOG.debug("Instance (%s): cpu_util is None", instance.uuid) LOG.debug("Instance (%s): cpu_util is None", instance.uuid)
@ -289,7 +289,7 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
This can be used to fetch some pre-requisites or data. This can be used to fetch some pre-requisites or data.
""" """
LOG.info(_LI("Initializing Workload Balance Strategy")) LOG.info("Initializing Workload Balance Strategy")
if not self.compute_model: if not self.compute_model:
raise wexc.ClusterStateNotDefined() raise wexc.ClusterStateNotDefined()
@ -314,9 +314,9 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
return self.solution return self.solution
if not target_nodes: if not target_nodes:
LOG.warning(_LW("No hosts current have CPU utilization under %s " LOG.warning("No hosts current have CPU utilization under %s "
"percent, therefore there are no possible target " "percent, therefore there are no possible target "
"hosts for any migration"), "hosts for any migration",
self.threshold) self.threshold)
return self.solution return self.solution
@ -337,8 +337,8 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
# pick up the lowest one as dest server # pick up the lowest one as dest server
if not destination_hosts: if not destination_hosts:
# for instance. # for instance.
LOG.warning(_LW("No proper target host could be found, it might " LOG.warning("No proper target host could be found, it might "
"be because of there's no enough CPU/Memory/DISK")) "be because of there's no enough CPU/Memory/DISK")
return self.solution return self.solution
destination_hosts = sorted(destination_hosts, destination_hosts = sorted(destination_hosts,
key=lambda x: (x["cpu_util"])) key=lambda x: (x["cpu_util"]))

View File

@ -38,7 +38,7 @@ from oslo_config import cfg
from oslo_log import log from oslo_log import log
import oslo_utils import oslo_utils
from watcher._i18n import _LI, _LW, _ from watcher._i18n import _
from watcher.common import exception from watcher.common import exception
from watcher.datasource import ceilometer as ceil from watcher.datasource import ceilometer as ceil
from watcher.decision_engine.model import element from watcher.decision_engine.model import element
@ -202,10 +202,9 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
) )
if avg_meter is None: if avg_meter is None:
LOG.warning( LOG.warning(
_LW("No values returned by %(resource_id)s " "No values returned by %(resource_id)s "
"for %(metric_name)s") % dict( "for %(metric_name)s" % dict(
resource_id=instance.uuid, resource_id=instance.uuid, metric_name=meter))
metric_name=meter))
avg_meter = 0 avg_meter = 0
if meter == 'cpu_util': if meter == 'cpu_util':
avg_meter /= float(100) avg_meter /= float(100)
@ -399,7 +398,7 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
return self.solution return self.solution
def pre_execute(self): def pre_execute(self):
LOG.info(_LI("Initializing Workload Stabilization")) LOG.info("Initializing Workload Stabilization")
if not self.compute_model: if not self.compute_model:
raise exception.ClusterStateNotDefined() raise exception.ClusterStateNotDefined()

View File

@ -19,7 +19,6 @@ import collections
from oslo_log import log from oslo_log import log
from watcher._i18n import _LI, _LW
from watcher.common import context from watcher.common import context
from watcher.decision_engine.loading import default from watcher.decision_engine.loading import default
from watcher.decision_engine.scoring import scoring_factory from watcher.decision_engine.scoring import scoring_factory
@ -136,7 +135,7 @@ class Syncer(object):
for goal_name, goal_map in goals_map.items(): for goal_name, goal_map in goals_map.items():
if goal_map in self.available_goals_map: if goal_map in self.available_goals_map:
LOG.info(_LI("Goal %s already exists"), goal_name) LOG.info("Goal %s already exists", goal_name)
continue continue
self.goal_mapping.update(self._sync_goal(goal_map)) self.goal_mapping.update(self._sync_goal(goal_map))
@ -145,14 +144,14 @@ class Syncer(object):
if (strategy_map in self.available_strategies_map and if (strategy_map in self.available_strategies_map and
strategy_map.goal_name not in strategy_map.goal_name not in
[g.name for g in self.goal_mapping.values()]): [g.name for g in self.goal_mapping.values()]):
LOG.info(_LI("Strategy %s already exists"), strategy_name) LOG.info("Strategy %s already exists", strategy_name)
continue continue
self.strategy_mapping.update(self._sync_strategy(strategy_map)) self.strategy_mapping.update(self._sync_strategy(strategy_map))
for se_name, se_map in scoringengines_map.items(): for se_name, se_map in scoringengines_map.items():
if se_map in self.available_scoringengines_map: if se_map in self.available_scoringengines_map:
LOG.info(_LI("Scoring Engine %s already exists"), LOG.info("Scoring Engine %s already exists",
se_name) se_name)
continue continue
@ -177,7 +176,7 @@ class Syncer(object):
indicator._asdict() indicator._asdict()
for indicator in goal_map.efficacy_specification] for indicator in goal_map.efficacy_specification]
goal.create() goal.create()
LOG.info(_LI("Goal %s created"), goal_name) LOG.info("Goal %s created", goal_name)
# Updating the internal states # Updating the internal states
self.available_goals_map[goal] = goal_map self.available_goals_map[goal] = goal_map
@ -208,7 +207,7 @@ class Syncer(object):
strategy.goal_id = objects.Goal.get_by_name(self.ctx, goal_name).id strategy.goal_id = objects.Goal.get_by_name(self.ctx, goal_name).id
strategy.parameters_spec = parameters_spec strategy.parameters_spec = parameters_spec
strategy.create() strategy.create()
LOG.info(_LI("Strategy %s created"), strategy_name) LOG.info("Strategy %s created", strategy_name)
# Updating the internal states # Updating the internal states
self.available_strategies_map[strategy] = strategy_map self.available_strategies_map[strategy] = strategy_map
@ -233,7 +232,7 @@ class Syncer(object):
scoringengine.description = scoringengine_map.description scoringengine.description = scoringengine_map.description
scoringengine.metainfo = scoringengine_map.metainfo scoringengine.metainfo = scoringengine_map.metainfo
scoringengine.create() scoringengine.create()
LOG.info(_LI("Scoring Engine %s created"), scoringengine_name) LOG.info("Scoring Engine %s created", scoringengine_name)
# Updating the internal states # Updating the internal states
self.available_scoringengines_map[scoringengine] = \ self.available_scoringengines_map[scoringengine] = \
@ -270,17 +269,17 @@ class Syncer(object):
# and soft delete stale audits and action plans # and soft delete stale audits and action plans
for stale_audit_template in self.stale_audit_templates_map.values(): for stale_audit_template in self.stale_audit_templates_map.values():
stale_audit_template.save() stale_audit_template.save()
LOG.info(_LI("Audit Template '%s' synced"), LOG.info("Audit Template '%s' synced",
stale_audit_template.name) stale_audit_template.name)
for stale_audit in self.stale_audits_map.values(): for stale_audit in self.stale_audits_map.values():
stale_audit.save() stale_audit.save()
LOG.info(_LI("Stale audit '%s' synced and cancelled"), LOG.info("Stale audit '%s' synced and cancelled",
stale_audit.uuid) stale_audit.uuid)
for stale_action_plan in self.stale_action_plans_map.values(): for stale_action_plan in self.stale_action_plans_map.values():
stale_action_plan.save() stale_action_plan.save()
LOG.info(_LI("Stale action plan '%s' synced and cancelled"), LOG.info("Stale action plan '%s' synced and cancelled",
stale_action_plan.uuid) stale_action_plan.uuid)
def _find_stale_audit_templates_due_to_goal(self): def _find_stale_audit_templates_due_to_goal(self):
@ -395,15 +394,15 @@ class Syncer(object):
invalid_ats = objects.AuditTemplate.list(self.ctx, filters=filters) invalid_ats = objects.AuditTemplate.list(self.ctx, filters=filters)
for at in invalid_ats: for at in invalid_ats:
LOG.warning( LOG.warning(
_LW("Audit Template '%(audit_template)s' references a " "Audit Template '%(audit_template)s' references a "
"goal that does not exist"), audit_template=at.uuid) "goal that does not exist", audit_template=at.uuid)
stale_audits = objects.Audit.list( stale_audits = objects.Audit.list(
self.ctx, filters=filters, eager=True) self.ctx, filters=filters, eager=True)
for audit in stale_audits: for audit in stale_audits:
LOG.warning( LOG.warning(
_LW("Audit '%(audit)s' references a " "Audit '%(audit)s' references a "
"goal that does not exist"), audit=audit.uuid) "goal that does not exist", audit=audit.uuid)
if audit.id not in self.stale_audits_map: if audit.id not in self.stale_audits_map:
audit.state = objects.audit.State.CANCELLED audit.state = objects.audit.State.CANCELLED
self.stale_audits_map[audit.id] = audit self.stale_audits_map[audit.id] = audit
@ -422,8 +421,8 @@ class Syncer(object):
invalid_ats = objects.AuditTemplate.list(self.ctx, filters=filters) invalid_ats = objects.AuditTemplate.list(self.ctx, filters=filters)
for at in invalid_ats: for at in invalid_ats:
LOG.info( LOG.info(
_LI("Audit Template '%(audit_template)s' references a " "Audit Template '%(audit_template)s' references a "
"strategy that does not exist"), "strategy that does not exist",
audit_template=at.uuid) audit_template=at.uuid)
# In this case we can reset the strategy ID to None # In this case we can reset the strategy ID to None
# so the audit template can still achieve the same goal # so the audit template can still achieve the same goal
@ -438,8 +437,8 @@ class Syncer(object):
self.ctx, filters=filters, eager=True) self.ctx, filters=filters, eager=True)
for audit in stale_audits: for audit in stale_audits:
LOG.warning( LOG.warning(
_LW("Audit '%(audit)s' references a " "Audit '%(audit)s' references a "
"strategy that does not exist"), audit=audit.uuid) "strategy that does not exist", audit=audit.uuid)
if audit.id not in self.stale_audits_map: if audit.id not in self.stale_audits_map:
audit.state = objects.audit.State.CANCELLED audit.state = objects.audit.State.CANCELLED
self.stale_audits_map[audit.id] = audit self.stale_audits_map[audit.id] = audit
@ -451,8 +450,8 @@ class Syncer(object):
self.ctx, filters=filters, eager=True) self.ctx, filters=filters, eager=True)
for action_plan in stale_action_plans: for action_plan in stale_action_plans:
LOG.warning( LOG.warning(
_LW("Action Plan '%(action_plan)s' references a " "Action Plan '%(action_plan)s' references a "
"strategy that does not exist"), "strategy that does not exist",
action_plan=action_plan.uuid) action_plan=action_plan.uuid)
if action_plan.id not in self.stale_action_plans_map: if action_plan.id not in self.stale_action_plans_map:
action_plan.state = objects.action_plan.State.CANCELLED action_plan.state = objects.action_plan.State.CANCELLED
@ -467,7 +466,7 @@ class Syncer(object):
se for se in self.available_scoringengines se for se in self.available_scoringengines
if se.name not in self.discovered_map['scoringengines']] if se.name not in self.discovered_map['scoringengines']]
for se in removed_se: for se in removed_se:
LOG.info(_LI("Scoring Engine %s removed"), se.name) LOG.info("Scoring Engine %s removed", se.name)
se.soft_delete() se.soft_delete()
def _discover(self): def _discover(self):
@ -526,9 +525,9 @@ class Syncer(object):
for matching_goal in matching_goals: for matching_goal in matching_goals:
if (matching_goal.efficacy_specification == goal_efficacy_spec and if (matching_goal.efficacy_specification == goal_efficacy_spec and
matching_goal.display_name == goal_display_name): matching_goal.display_name == goal_display_name):
LOG.info(_LI("Goal %s unchanged"), goal_name) LOG.info("Goal %s unchanged", goal_name)
else: else:
LOG.info(_LI("Goal %s modified"), goal_name) LOG.info("Goal %s modified", goal_name)
matching_goal.soft_delete() matching_goal.soft_delete()
stale_goals.append(matching_goal) stale_goals.append(matching_goal)
@ -545,9 +544,9 @@ class Syncer(object):
matching_strategy.goal_id not in self.goal_mapping and matching_strategy.goal_id not in self.goal_mapping and
matching_strategy.parameters_spec == matching_strategy.parameters_spec ==
ast.literal_eval(parameters_spec)): ast.literal_eval(parameters_spec)):
LOG.info(_LI("Strategy %s unchanged"), strategy_name) LOG.info("Strategy %s unchanged", strategy_name)
else: else:
LOG.info(_LI("Strategy %s modified"), strategy_name) LOG.info("Strategy %s modified", strategy_name)
matching_strategy.soft_delete() matching_strategy.soft_delete()
stale_strategies.append(matching_strategy) stale_strategies.append(matching_strategy)
@ -563,9 +562,9 @@ class Syncer(object):
for matching_scoringengine in matching_scoringengines: for matching_scoringengine in matching_scoringengines:
if (matching_scoringengine.description == se_description and if (matching_scoringengine.description == se_description and
matching_scoringengine.metainfo == se_metainfo): matching_scoringengine.metainfo == se_metainfo):
LOG.info(_LI("Scoring Engine %s unchanged"), se_name) LOG.info("Scoring Engine %s unchanged", se_name)
else: else:
LOG.info(_LI("Scoring Engine %s modified"), se_name) LOG.info("Scoring Engine %s modified", se_name)
matching_scoringengine.soft_delete() matching_scoringengine.soft_delete()
stale_scoringengines.append(matching_scoringengine) stale_scoringengines.append(matching_scoringengine)