From f605888e323a352feb25f19c22ff7632b7d101e3 Mon Sep 17 00:00:00 2001 From: yanxubin Date: Wed, 22 Mar 2017 16:01:23 +0800 Subject: [PATCH] Remove log translations Log messages are no longer being translated. This removes all use of the _LE, _LI, and _LW translation markers to simplify logging and to avoid confusion with new contributions. See: http://lists.openstack.org/pipermail/openstack-i18n/2016-November/002574.html http://lists.openstack.org/pipermail/openstack-dev/2017-March/113365.html Change-Id: I3552767976807a9851af69b1fa4f86ac25943025 --- tox.ini | 2 +- watcher/api/controllers/v1/service.py | 7 ++- watcher/api/middleware/parsable_error.py | 4 +- watcher/applier/actions/migration.py | 14 ++--- watcher/applier/actions/resize.py | 6 +-- watcher/applier/workflow_engine/base.py | 5 +- watcher/applier/workflow_engine/default.py | 5 +- watcher/cmd/api.py | 7 ++- watcher/cmd/applier.py | 3 +- watcher/cmd/decisionengine.py | 3 +- watcher/cmd/sync.py | 5 +- watcher/common/context.py | 3 +- watcher/common/exception.py | 6 +-- watcher/common/rpc.py | 3 +- watcher/common/utils.py | 5 +- watcher/db/purge.py | 24 ++++----- .../model/notification/nova.py | 39 +++++++------- watcher/decision_engine/planner/weight.py | 3 +- .../planner/workload_stabilization.py | 3 +- watcher/decision_engine/scope/default.py | 7 ++- .../strategies/basic_consolidation.py | 28 +++++----- .../strategies/outlet_temp_control.py | 12 ++--- .../strategy/strategies/uniform_airflow.py | 22 ++++---- .../strategies/vm_workload_consolidation.py | 24 ++++----- .../strategy/strategies/workload_balance.py | 20 +++---- .../strategies/workload_stabilization.py | 11 ++-- watcher/decision_engine/sync.py | 53 +++++++++---------- 27 files changed, 154 insertions(+), 170 deletions(-) diff --git a/tox.ini b/tox.ini index 67d3d9147..79da018b9 100644 --- a/tox.ini +++ b/tox.ini @@ -45,7 +45,7 @@ commands = [flake8] show-source=True -ignore= H105,E123,E226 +ignore= H105,E123,E226,N320 builtins= _ exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build,*sqlalchemy/alembic/versions/*,demo/,releasenotes diff --git a/watcher/api/controllers/v1/service.py b/watcher/api/controllers/v1/service.py index 9933704ee..674be05ea 100644 --- a/watcher/api/controllers/v1/service.py +++ b/watcher/api/controllers/v1/service.py @@ -30,7 +30,6 @@ import wsme from wsme import types as wtypes import wsmeext.pecan as wsme_pecan -from watcher._i18n import _LW from watcher.api.controllers import base from watcher.api.controllers import link from watcher.api.controllers.v1 import collection @@ -72,9 +71,9 @@ class Service(base.APIBase): elapsed = timeutils.delta_seconds(last_heartbeat, timeutils.utcnow()) is_up = abs(elapsed) <= CONF.service_down_time if not is_up: - LOG.warning(_LW('Seems service %(name)s on host %(host)s is down. ' - 'Last heartbeat was %(lhb)s.' - 'Elapsed time is %(el)s'), + LOG.warning('Seems service %(name)s on host %(host)s is down. ' + 'Last heartbeat was %(lhb)s.' + 'Elapsed time is %(el)s', {'name': service.name, 'host': service.host, 'lhb': str(last_heartbeat), 'el': str(elapsed)}) diff --git a/watcher/api/middleware/parsable_error.py b/watcher/api/middleware/parsable_error.py index 4acfe10ee..9d905ab4a 100644 --- a/watcher/api/middleware/parsable_error.py +++ b/watcher/api/middleware/parsable_error.py @@ -27,7 +27,7 @@ from oslo_serialization import jsonutils import six import webob -from watcher._i18n import _, _LE +from watcher._i18n import _ LOG = log.getLogger(__name__) @@ -79,7 +79,7 @@ class ParsableErrorMiddleware(object): et.ElementTree.Element( 'error_message', text='\n'.join(app_iter)))] except et.ElementTree.ParseError as err: - LOG.error(_LE('Error parsing HTTP response: %s'), err) + LOG.error('Error parsing HTTP response: %s', err) body = ['%s' '' % state['status_code']] state['headers'].append(('Content-Type', 'application/xml')) diff --git a/watcher/applier/actions/migration.py b/watcher/applier/actions/migration.py index e3525d97b..eb742fae6 100644 --- a/watcher/applier/actions/migration.py +++ b/watcher/applier/actions/migration.py @@ -21,7 +21,7 @@ from oslo_log import log import six import voluptuous -from watcher._i18n import _, _LC +from watcher._i18n import _ from watcher.applier.actions import base from watcher.common import exception from watcher.common import nova_helper @@ -120,9 +120,9 @@ class Migrate(base.BaseAction): "migrating instance %s.Exception: %s" % (self.instance_uuid, e)) except Exception: - LOG.critical(_LC("Unexpected error occurred. Migration failed for " - "instance %s. Leaving instance on previous " - "host."), self.instance_uuid) + LOG.critical("Unexpected error occurred. Migration failed for " + "instance %s. Leaving instance on previous " + "host.", self.instance_uuid) return result @@ -134,9 +134,9 @@ class Migrate(base.BaseAction): dest_hostname=destination) except Exception as exc: LOG.exception(exc) - LOG.critical(_LC("Unexpected error occurred. Migration failed for " - "instance %s. Leaving instance on previous " - "host."), self.instance_uuid) + LOG.critical("Unexpected error occurred. Migration failed for " + "instance %s. Leaving instance on previous " + "host.", self.instance_uuid) return result diff --git a/watcher/applier/actions/resize.py b/watcher/applier/actions/resize.py index 0db45f9ce..ea9df82fb 100644 --- a/watcher/applier/actions/resize.py +++ b/watcher/applier/actions/resize.py @@ -21,7 +21,7 @@ from oslo_log import log import six import voluptuous -from watcher._i18n import _, _LC +from watcher._i18n import _ from watcher.applier.actions import base from watcher.common import nova_helper from watcher.common import utils @@ -86,8 +86,8 @@ class Resize(base.BaseAction): except Exception as exc: LOG.exception(exc) LOG.critical( - _LC("Unexpected error occurred. Resizing failed for " - "instance %s."), self.instance_uuid) + "Unexpected error occurred. Resizing failed for " + "instance %s.", self.instance_uuid) return result def execute(self): diff --git a/watcher/applier/workflow_engine/base.py b/watcher/applier/workflow_engine/base.py index efe306282..7fff92c80 100644 --- a/watcher/applier/workflow_engine/base.py +++ b/watcher/applier/workflow_engine/base.py @@ -22,7 +22,6 @@ from oslo_log import log import six from taskflow import task as flow_task -from watcher._i18n import _LE from watcher.applier.actions import factory from watcher.common import clients from watcher.common.loader import loadable @@ -151,8 +150,8 @@ class BaseTaskFlowActionContainer(flow_task.Task): fields.NotificationPhase.END) except Exception as e: LOG.exception(e) - LOG.error(_LE('The workflow engine has failed ' - 'to execute the action: %s'), self.name) + LOG.error('The workflow engine has failed ' + 'to execute the action: %s', self.name) self.engine.notify(self._db_action, objects.action.State.FAILED) notifications.action.send_execution_notification( self.engine.context, self._db_action, diff --git a/watcher/applier/workflow_engine/default.py b/watcher/applier/workflow_engine/default.py index 47f70e658..804a42ce1 100644 --- a/watcher/applier/workflow_engine/default.py +++ b/watcher/applier/workflow_engine/default.py @@ -22,7 +22,6 @@ from taskflow import engines from taskflow.patterns import graph_flow as gf from taskflow import task as flow_task -from watcher._i18n import _LW, _LC from watcher.applier.workflow_engine import base from watcher.common import exception from watcher import objects @@ -117,13 +116,13 @@ class TaskFlowActionContainer(base.BaseTaskFlowActionContainer): self.action.post_condition() def revert(self, *args, **kwargs): - LOG.warning(_LW("Revert action: %s"), self.name) + LOG.warning("Revert action: %s", self.name) try: # TODO(jed): do we need to update the states in case of failure? self.action.revert() except Exception as e: LOG.exception(e) - LOG.critical(_LC("Oops! We need a disaster recover plan.")) + LOG.critical("Oops! We need a disaster recover plan.") class TaskFlowNop(flow_task.Task): diff --git a/watcher/cmd/api.py b/watcher/cmd/api.py index 5428e3c42..c354ffc70 100644 --- a/watcher/cmd/api.py +++ b/watcher/cmd/api.py @@ -22,7 +22,6 @@ import sys from oslo_config import cfg from oslo_log import log as logging -from watcher._i18n import _LI from watcher.common import service from watcher import conf @@ -39,11 +38,11 @@ def main(): server = service.WSGIService('watcher-api', CONF.api.enable_ssl_api) if host == '127.0.0.1': - LOG.info(_LI('serving on 127.0.0.1:%(port)s, ' - 'view at %(protocol)s://127.0.0.1:%(port)s') % + LOG.info('serving on 127.0.0.1:%(port)s, ' + 'view at %(protocol)s://127.0.0.1:%(port)s' % dict(protocol=protocol, port=port)) else: - LOG.info(_LI('serving on %(protocol)s://%(host)s:%(port)s') % + LOG.info('serving on %(protocol)s://%(host)s:%(port)s' % dict(protocol=protocol, host=host, port=port)) launcher = service.launch(CONF, server, workers=server.workers) diff --git a/watcher/cmd/applier.py b/watcher/cmd/applier.py index 2e58b518e..364a9ba10 100644 --- a/watcher/cmd/applier.py +++ b/watcher/cmd/applier.py @@ -22,7 +22,6 @@ import sys from oslo_log import log as logging -from watcher._i18n import _LI from watcher.applier import manager from watcher.common import service as watcher_service from watcher import conf @@ -34,7 +33,7 @@ CONF = conf.CONF def main(): watcher_service.prepare_service(sys.argv, CONF) - LOG.info(_LI('Starting Watcher Applier service in PID %s'), os.getpid()) + LOG.info('Starting Watcher Applier service in PID %s', os.getpid()) applier_service = watcher_service.Service(manager.ApplierManager) diff --git a/watcher/cmd/decisionengine.py b/watcher/cmd/decisionengine.py index 94219fbad..9b7e7f9a9 100644 --- a/watcher/cmd/decisionengine.py +++ b/watcher/cmd/decisionengine.py @@ -22,7 +22,6 @@ import sys from oslo_log import log as logging -from watcher._i18n import _LI from watcher.common import service as watcher_service from watcher import conf from watcher.decision_engine import gmr @@ -38,7 +37,7 @@ def main(): watcher_service.prepare_service(sys.argv, CONF) gmr.register_gmr_plugins() - LOG.info(_LI('Starting Watcher Decision Engine service in PID %s'), + LOG.info('Starting Watcher Decision Engine service in PID %s', os.getpid()) syncer = sync.Syncer() diff --git a/watcher/cmd/sync.py b/watcher/cmd/sync.py index 488e56e89..c0cbf3888 100644 --- a/watcher/cmd/sync.py +++ b/watcher/cmd/sync.py @@ -22,7 +22,6 @@ import sys from oslo_log import log as logging -from watcher._i18n import _LI from watcher.common import service as service from watcher import conf from watcher.decision_engine import sync @@ -32,10 +31,10 @@ CONF = conf.CONF def main(): - LOG.info(_LI('Watcher sync started.')) + LOG.info('Watcher sync started.') service.prepare_service(sys.argv, CONF) syncer = sync.Syncer() syncer.sync() - LOG.info(_LI('Watcher sync finished.')) + LOG.info('Watcher sync finished.') diff --git a/watcher/common/context.py b/watcher/common/context.py index 1d980c6af..4984898aa 100644 --- a/watcher/common/context.py +++ b/watcher/common/context.py @@ -15,7 +15,6 @@ from oslo_log import log as logging from oslo_utils import timeutils import six -from watcher._i18n import _LW from watcher.common import utils LOG = logging.getLogger(__name__) @@ -65,7 +64,7 @@ class RequestContext(context.RequestContext): # safely ignore this as we don't use it. kwargs.pop('user_identity', None) if kwargs: - LOG.warning(_LW('Arguments dropped when creating context: %s'), + LOG.warning('Arguments dropped when creating context: %s', str(kwargs)) # FIXME(dims): user_id and project_id duplicate information that is diff --git a/watcher/common/exception.py b/watcher/common/exception.py index ec1924cea..1886c6daa 100644 --- a/watcher/common/exception.py +++ b/watcher/common/exception.py @@ -29,7 +29,7 @@ from keystoneclient import exceptions as keystone_exceptions from oslo_log import log as logging import six -from watcher._i18n import _, _LE +from watcher._i18n import _ from watcher import conf @@ -83,9 +83,9 @@ class WatcherException(Exception): except Exception: # kwargs doesn't match a variable in msg_fmt # log the issue and the kwargs - LOG.exception(_LE('Exception in string format operation')) + LOG.exception('Exception in string format operation') for name, value in kwargs.items(): - LOG.error(_LE("%(name)s: %(value)s"), + LOG.error("%(name)s: %(value)s", {'name': name, 'value': value}) if CONF.fatal_exception_format_errors: diff --git a/watcher/common/rpc.py b/watcher/common/rpc.py index 0905546b8..09b7dd6e9 100644 --- a/watcher/common/rpc.py +++ b/watcher/common/rpc.py @@ -17,7 +17,6 @@ from oslo_config import cfg from oslo_log import log import oslo_messaging as messaging -from watcher._i18n import _LE from watcher.common import context as watcher_context from watcher.common import exception @@ -74,7 +73,7 @@ def initialized(): def cleanup(): global TRANSPORT, NOTIFICATION_TRANSPORT, NOTIFIER if NOTIFIER is None: - LOG.exception(_LE("RPC cleanup: NOTIFIER is None")) + LOG.exception("RPC cleanup: NOTIFIER is None") TRANSPORT.cleanup() NOTIFICATION_TRANSPORT.cleanup() TRANSPORT = NOTIFICATION_TRANSPORT = NOTIFIER = None diff --git a/watcher/common/utils.py b/watcher/common/utils.py index 44cb050d5..ff9cdc110 100644 --- a/watcher/common/utils.py +++ b/watcher/common/utils.py @@ -25,7 +25,6 @@ from oslo_utils import timeutils from oslo_utils import uuidutils import six -from watcher._i18n import _LW from watcher.common import exception from watcher import conf @@ -73,9 +72,9 @@ def safe_rstrip(value, chars=None): """ if not isinstance(value, six.string_types): - LOG.warning(_LW( + LOG.warning( "Failed to remove trailing character. Returning original object." - "Supplied object is not a string: %s,"), value) + "Supplied object is not a string: %s,", value) return value return value.rstrip(chars) or value diff --git a/watcher/db/purge.py b/watcher/db/purge.py index e3e09a0a0..4fb6e5efe 100644 --- a/watcher/db/purge.py +++ b/watcher/db/purge.py @@ -27,7 +27,7 @@ from oslo_utils import strutils import prettytable as ptable from six.moves import input -from watcher._i18n import _, _LI +from watcher._i18n import _ from watcher._i18n import lazy_translation_enabled from watcher.common import context from watcher.common import exception @@ -231,7 +231,7 @@ class PurgeCommand(object): if action.action_plan_id not in action_plan_ids] LOG.debug("Orphans found:\n%s", orphans) - LOG.info(_LI("Orphans found:\n%s"), orphans.get_count_table()) + LOG.info("Orphans found:\n%s", orphans.get_count_table()) return orphans @@ -403,13 +403,13 @@ class PurgeCommand(object): return to_be_deleted def do_delete(self): - LOG.info(_LI("Deleting...")) + LOG.info("Deleting...") # Reversed to avoid errors with foreign keys for entry in reversed(list(self._objects_map)): entry.destroy() def execute(self): - LOG.info(_LI("Starting purge command")) + LOG.info("Starting purge command") self._objects_map = self.find_objects_to_delete() if (self.max_number is not None and @@ -424,15 +424,15 @@ class PurgeCommand(object): if not self.dry_run and self.confirmation_prompt(): self.do_delete() print(_("Purge results summary%s:") % _orphans_note) - LOG.info(_LI("Purge results summary%s:"), _orphans_note) + LOG.info("Purge results summary%s:", _orphans_note) else: LOG.debug(self._objects_map) print(_("Here below is a table containing the objects " "that can be purged%s:") % _orphans_note) - LOG.info(_LI("\n%s"), self._objects_map.get_count_table()) + LOG.info("\n%s", self._objects_map.get_count_table()) print(self._objects_map.get_count_table()) - LOG.info(_LI("Purge process completed")) + LOG.info("Purge process completed") def purge(age_in_days, max_number, goal, exclude_orphans, dry_run): @@ -457,11 +457,11 @@ def purge(age_in_days, max_number, goal, exclude_orphans, dry_run): if max_number and max_number < 0: raise exception.NegativeLimitError - LOG.info(_LI("[options] age_in_days = %s"), age_in_days) - LOG.info(_LI("[options] max_number = %s"), max_number) - LOG.info(_LI("[options] goal = %s"), goal) - LOG.info(_LI("[options] exclude_orphans = %s"), exclude_orphans) - LOG.info(_LI("[options] dry_run = %s"), dry_run) + LOG.info("[options] age_in_days = %s", age_in_days) + LOG.info("[options] max_number = %s", max_number) + LOG.info("[options] goal = %s", goal) + LOG.info("[options] exclude_orphans = %s", exclude_orphans) + LOG.info("[options] dry_run = %s", dry_run) uuid = PurgeCommand.get_goal_uuid(goal) diff --git a/watcher/decision_engine/model/notification/nova.py b/watcher/decision_engine/model/notification/nova.py index 3b578bbc2..42df5cdd2 100644 --- a/watcher/decision_engine/model/notification/nova.py +++ b/watcher/decision_engine/model/notification/nova.py @@ -17,7 +17,6 @@ # limitations under the License. from oslo_log import log -from watcher._i18n import _LI, _LW from watcher.common import exception from watcher.common import nova_helper from watcher.decision_engine.model import element @@ -44,8 +43,8 @@ class NovaNotification(base.NotificationEndpoint): if node_uuid: self.get_or_create_node(node_uuid) except exception.ComputeNodeNotFound: - LOG.warning(_LW("Could not find compute node %(node)s for " - "instance %(instance)s"), + LOG.warning("Could not find compute node %(node)s for " + "instance %(instance)s", dict(node=node_uuid, instance=instance_uuid)) try: instance = self.cluster_data_model.get_instance_by_uuid( @@ -201,7 +200,7 @@ class NovaNotification(base.NotificationEndpoint): try: self.cluster_data_model.delete_instance(instance, node) except Exception: - LOG.info(_LI("Instance %s already deleted"), instance.uuid) + LOG.info("Instance %s already deleted", instance.uuid) class VersionedNotificationEndpoint(NovaNotification): @@ -225,8 +224,8 @@ class ServiceUpdated(VersionedNotificationEndpoint): def info(self, ctxt, publisher_id, event_type, payload, metadata): ctxt.request_id = metadata['message_id'] ctxt.project_domain = event_type - LOG.info(_LI("Event '%(event)s' received from %(publisher)s " - "with metadata %(metadata)s") % + LOG.info("Event '%(event)s' received from %(publisher)s " + "with metadata %(metadata)s" % dict(event=event_type, publisher=publisher_id, metadata=metadata)) @@ -269,8 +268,8 @@ class InstanceCreated(VersionedNotificationEndpoint): def info(self, ctxt, publisher_id, event_type, payload, metadata): ctxt.request_id = metadata['message_id'] ctxt.project_domain = event_type - LOG.info(_LI("Event '%(event)s' received from %(publisher)s " - "with metadata %(metadata)s") % + LOG.info("Event '%(event)s' received from %(publisher)s " + "with metadata %(metadata)s" % dict(event=event_type, publisher=publisher_id, metadata=metadata)) @@ -304,8 +303,8 @@ class InstanceUpdated(VersionedNotificationEndpoint): def info(self, ctxt, publisher_id, event_type, payload, metadata): ctxt.request_id = metadata['message_id'] ctxt.project_domain = event_type - LOG.info(_LI("Event '%(event)s' received from %(publisher)s " - "with metadata %(metadata)s") % + LOG.info("Event '%(event)s' received from %(publisher)s " + "with metadata %(metadata)s" % dict(event=event_type, publisher=publisher_id, metadata=metadata)) @@ -331,8 +330,8 @@ class InstanceDeletedEnd(VersionedNotificationEndpoint): def info(self, ctxt, publisher_id, event_type, payload, metadata): ctxt.request_id = metadata['message_id'] ctxt.project_domain = event_type - LOG.info(_LI("Event '%(event)s' received from %(publisher)s " - "with metadata %(metadata)s") % + LOG.info("Event '%(event)s' received from %(publisher)s " + "with metadata %(metadata)s" % dict(event=event_type, publisher=publisher_id, metadata=metadata)) @@ -366,8 +365,8 @@ class LegacyInstanceUpdated(UnversionedNotificationEndpoint): def info(self, ctxt, publisher_id, event_type, payload, metadata): ctxt.request_id = metadata['message_id'] ctxt.project_domain = event_type - LOG.info(_LI("Event '%(event)s' received from %(publisher)s " - "with metadata %(metadata)s") % + LOG.info("Event '%(event)s' received from %(publisher)s " + "with metadata %(metadata)s" % dict(event=event_type, publisher=publisher_id, metadata=metadata)) @@ -393,8 +392,8 @@ class LegacyInstanceCreatedEnd(UnversionedNotificationEndpoint): def info(self, ctxt, publisher_id, event_type, payload, metadata): ctxt.request_id = metadata['message_id'] ctxt.project_domain = event_type - LOG.info(_LI("Event '%(event)s' received from %(publisher)s " - "with metadata %(metadata)s") % + LOG.info("Event '%(event)s' received from %(publisher)s " + "with metadata %(metadata)s" % dict(event=event_type, publisher=publisher_id, metadata=metadata)) @@ -420,8 +419,8 @@ class LegacyInstanceDeletedEnd(UnversionedNotificationEndpoint): def info(self, ctxt, publisher_id, event_type, payload, metadata): ctxt.request_id = metadata['message_id'] ctxt.project_domain = event_type - LOG.info(_LI("Event '%(event)s' received from %(publisher)s " - "with metadata %(metadata)s") % + LOG.info("Event '%(event)s' received from %(publisher)s " + "with metadata %(metadata)s" % dict(event=event_type, publisher=publisher_id, metadata=metadata)) @@ -453,8 +452,8 @@ class LegacyLiveMigratedEnd(UnversionedNotificationEndpoint): def info(self, ctxt, publisher_id, event_type, payload, metadata): ctxt.request_id = metadata['message_id'] ctxt.project_domain = event_type - LOG.info(_LI("Event '%(event)s' received from %(publisher)s " - "with metadata %(metadata)s") % + LOG.info("Event '%(event)s' received from %(publisher)s " + "with metadata %(metadata)s" % dict(event=event_type, publisher=publisher_id, metadata=metadata)) diff --git a/watcher/decision_engine/planner/weight.py b/watcher/decision_engine/planner/weight.py index a504a738b..8605917b0 100644 --- a/watcher/decision_engine/planner/weight.py +++ b/watcher/decision_engine/planner/weight.py @@ -22,7 +22,6 @@ from oslo_config import cfg from oslo_config import types from oslo_log import log -from watcher._i18n import _LW from watcher.common import utils from watcher.decision_engine.planner import base from watcher import objects @@ -152,7 +151,7 @@ class WeightPlanner(base.BasePlanner): context, action_plan.id, solution.efficacy_indicators) if len(action_graph.nodes()) == 0: - LOG.warning(_LW("The action plan is empty")) + LOG.warning("The action plan is empty") action_plan.state = objects.action_plan.State.SUCCEEDED action_plan.save() diff --git a/watcher/decision_engine/planner/workload_stabilization.py b/watcher/decision_engine/planner/workload_stabilization.py index 125e31021..f7cd96afa 100644 --- a/watcher/decision_engine/planner/workload_stabilization.py +++ b/watcher/decision_engine/planner/workload_stabilization.py @@ -20,7 +20,6 @@ from oslo_config import cfg from oslo_config import types from oslo_log import log -from watcher._i18n import _LW from watcher.common import clients from watcher.common import exception from watcher.common import nova_helper @@ -117,7 +116,7 @@ class WorkloadStabilizationPlanner(base.BasePlanner): scheduled = sorted(to_schedule, key=lambda weight: (weight[0]), reverse=True) if len(scheduled) == 0: - LOG.warning(_LW("The action plan is empty")) + LOG.warning("The action plan is empty") action_plan.state = objects.action_plan.State.SUCCEEDED action_plan.save() else: diff --git a/watcher/decision_engine/scope/default.py b/watcher/decision_engine/scope/default.py index f6de55b67..dfc0a2de8 100644 --- a/watcher/decision_engine/scope/default.py +++ b/watcher/decision_engine/scope/default.py @@ -16,7 +16,6 @@ from oslo_log import log -from watcher._i18n import _LW from watcher.common import exception from watcher.common import nova_helper from watcher.decision_engine.scope import base @@ -170,9 +169,9 @@ class DefaultScope(base.BaseScope): node_name = cluster_model.get_node_by_instance_uuid( instance_uuid).uuid except exception.ComputeResourceNotFound: - LOG.warning(_LW("The following instance %s cannot be found. " - "It might be deleted from CDM along with node" - " instance was hosted on."), + LOG.warning("The following instance %s cannot be found. " + "It might be deleted from CDM along with node" + " instance was hosted on.", instance_uuid) continue self.remove_instance( diff --git a/watcher/decision_engine/strategy/strategies/basic_consolidation.py b/watcher/decision_engine/strategy/strategies/basic_consolidation.py index e96f499cc..1341f1ab2 100644 --- a/watcher/decision_engine/strategy/strategies/basic_consolidation.py +++ b/watcher/decision_engine/strategy/strategies/basic_consolidation.py @@ -38,7 +38,7 @@ migration is possible on your OpenStack cluster. from oslo_config import cfg from oslo_log import log -from watcher._i18n import _, _LE, _LI, _LW +from watcher._i18n import _ from watcher.common import exception from watcher.datasource import ceilometer as ceil from watcher.datasource import monasca as mon @@ -319,11 +319,11 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy): if host_avg_cpu_util is None: resource_id = "%s_%s" % (node.uuid, node.hostname) LOG.error( - _LE("No values returned by %(resource_id)s " - "for %(metric_name)s") % dict( - resource_id=resource_id, - metric_name=self.METRIC_NAMES[ - self.config.datasource]['host_cpu_usage'])) + "No values returned by %(resource_id)s " + "for %(metric_name)s" % dict( + resource_id=resource_id, + metric_name=self.METRIC_NAMES[ + self.config.datasource]['host_cpu_usage'])) host_avg_cpu_util = 100 total_cores_used = node.vcpus * (host_avg_cpu_util / 100.0) @@ -339,11 +339,11 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy): instance_cpu_utilization = self.get_instance_cpu_usage(instance) if instance_cpu_utilization is None: LOG.error( - _LE("No values returned by %(resource_id)s " - "for %(metric_name)s") % dict( - resource_id=instance.uuid, - metric_name=self.METRIC_NAMES[ - self.config.datasource]['instance_cpu_usage'])) + "No values returned by %(resource_id)s " + "for %(metric_name)s" % dict( + resource_id=instance.uuid, + metric_name=self.METRIC_NAMES[ + self.config.datasource]['instance_cpu_usage'])) instance_cpu_utilization = 100 total_cores_used = instance.vcpus * (instance_cpu_utilization / 100.0) @@ -439,7 +439,7 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy): return unsuccessful_migration + 1 def pre_execute(self): - LOG.info(_LI("Initializing Server Consolidation")) + LOG.info("Initializing Server Consolidation") if not self.compute_model: raise exception.ClusterStateNotDefined() @@ -461,9 +461,9 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy): LOG.debug("Compute node(s) BFD %s", sorted_scores) # Get Node to be released if len(scores) == 0: - LOG.warning(_LW( + LOG.warning( "The workloads of the compute nodes" - " of the cluster is zero")) + " of the cluster is zero") return while sorted_scores and ( diff --git a/watcher/decision_engine/strategy/strategies/outlet_temp_control.py b/watcher/decision_engine/strategy/strategies/outlet_temp_control.py index c260e462c..9e4205c63 100644 --- a/watcher/decision_engine/strategy/strategies/outlet_temp_control.py +++ b/watcher/decision_engine/strategy/strategies/outlet_temp_control.py @@ -30,7 +30,7 @@ telemetries to measure thermal/workload status of server. from oslo_log import log -from watcher._i18n import _, _LW, _LI +from watcher._i18n import _ from watcher.common import exception as wexc from watcher.datasource import ceilometer as ceil from watcher.decision_engine.model import element @@ -153,7 +153,7 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy): aggregate='avg') # some hosts may not have outlet temp meters, remove from target if outlet_temp is None: - LOG.warning(_LW("%s: no outlet temp data"), resource_id) + LOG.warning("%s: no outlet temp data", resource_id) continue LOG.debug("%s: outlet temperature %f" % (resource_id, outlet_temp)) @@ -176,13 +176,13 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy): # select the first active instance to migrate if (instance.state != element.InstanceState.ACTIVE.value): - LOG.info(_LI("Instance not active, skipped: %s"), + LOG.info("Instance not active, skipped: %s", instance.uuid) continue return mig_source_node, instance except wexc.InstanceNotFound as e: LOG.exception(e) - LOG.info(_LI("Instance not found")) + LOG.info("Instance not found") return None @@ -233,7 +233,7 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy): return self.solution if len(hosts_target) == 0: - LOG.warning(_LW("No hosts under outlet temp threshold found")) + LOG.warning("No hosts under outlet temp threshold found") return self.solution # choose the server with highest outlet t @@ -254,7 +254,7 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy): if len(dest_servers) == 0: # TODO(zhenzanz): maybe to warn that there's no resource # for instance. - LOG.info(_LI("No proper target host could be found")) + LOG.info("No proper target host could be found") return self.solution dest_servers = sorted(dest_servers, key=lambda x: (x["outlet_temp"])) diff --git a/watcher/decision_engine/strategy/strategies/uniform_airflow.py b/watcher/decision_engine/strategy/strategies/uniform_airflow.py index 8eafbdc2c..a779ed259 100644 --- a/watcher/decision_engine/strategy/strategies/uniform_airflow.py +++ b/watcher/decision_engine/strategy/strategies/uniform_airflow.py @@ -45,7 +45,7 @@ airflow is higher than the specified threshold. from oslo_log import log -from watcher._i18n import _, _LI, _LW +from watcher._i18n import _ from watcher.common import exception as wexc from watcher.datasource import ceilometer as ceil from watcher.decision_engine.model import element @@ -210,13 +210,13 @@ class UniformAirflow(base.BaseStrategy): if (instance.state != element.InstanceState.ACTIVE.value): LOG.info( - _LI("Instance not active, skipped: %s"), + "Instance not active, skipped: %s", instance.uuid) continue instances_tobe_migrate.append(instance) return source_node, instances_tobe_migrate else: - LOG.info(_LI("Instance not found on node: %s"), + LOG.info("Instance not found on node: %s", source_node.uuid) def filter_destination_hosts(self, hosts, instances_to_migrate): @@ -257,8 +257,8 @@ class UniformAirflow(base.BaseStrategy): break # check if all instances have target hosts if len(destination_hosts) != len(instances_to_migrate): - LOG.warning(_LW("Not all target hosts could be found; it might " - "be because there is not enough resource")) + LOG.warning("Not all target hosts could be found; it might " + "be because there is not enough resource") return None return destination_hosts @@ -281,7 +281,7 @@ class UniformAirflow(base.BaseStrategy): aggregate='avg') # some hosts may not have airflow meter, remove from target if airflow is None: - LOG.warning(_LW("%s: no airflow data"), resource_id) + LOG.warning("%s: no airflow data", resource_id) continue LOG.debug("%s: airflow %f" % (resource_id, airflow)) @@ -316,9 +316,9 @@ class UniformAirflow(base.BaseStrategy): return self.solution if not target_nodes: - LOG.warning(_LW("No hosts currently have airflow under %s, " - "therefore there are no possible target " - "hosts for any migration"), + LOG.warning("No hosts currently have airflow under %s, " + "therefore there are no possible target " + "hosts for any migration", self.threshold_airflow) return self.solution @@ -337,8 +337,8 @@ class UniformAirflow(base.BaseStrategy): destination_hosts = self.filter_destination_hosts( target_nodes, instances_src) if not destination_hosts: - LOG.warning(_LW("No target host could be found; it might " - "be because there is not enough resources")) + LOG.warning("No target host could be found; it might " + "be because there is not enough resources") return self.solution # generate solution to migrate the instance to the dest server, for info in destination_hosts: diff --git a/watcher/decision_engine/strategy/strategies/vm_workload_consolidation.py b/watcher/decision_engine/strategy/strategies/vm_workload_consolidation.py index 0270c87ff..21f9c1146 100644 --- a/watcher/decision_engine/strategy/strategies/vm_workload_consolidation.py +++ b/watcher/decision_engine/strategy/strategies/vm_workload_consolidation.py @@ -56,7 +56,7 @@ an active compute node to any other active compute node. from oslo_log import log import six -from watcher._i18n import _, _LE, _LI +from watcher._i18n import _ from watcher.common import exception from watcher.datasource import ceilometer as ceil from watcher.decision_engine.model import element @@ -107,10 +107,10 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy): elif isinstance(state, (element.InstanceState, element.ServiceState)): return state.value else: - LOG.error(_LE('Unexpexted resource state type, ' - 'state=%(state)s, state_type=%(st)s.') % dict( - state=state, - st=type(state))) + LOG.error('Unexpected resource state type, ' + 'state=%(state)s, state_type=%(st)s.' % + dict(state=state, + st=type(state))) raise exception.WatcherException def add_action_enable_compute_node(self, node): @@ -154,10 +154,10 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy): # When supported, the cold migration may be used as a fallback # migration mechanism to move non active VMs. LOG.error( - _LE('Cannot live migrate: instance_uuid=%(instance_uuid)s, ' - 'state=%(instance_state)s.') % dict( - instance_uuid=instance.uuid, - instance_state=instance_state_str)) + 'Cannot live migrate: instance_uuid=%(instance_uuid)s, ' + 'state=%(instance_state)s.' % dict( + instance_uuid=instance.uuid, + instance_state=instance_state_str)) return migration_type = 'live' @@ -229,8 +229,8 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy): if not instance_ram_util or not instance_disk_util: LOG.error( - _LE('No values returned by %s for memory.usage ' - 'or disk.root.size'), instance.uuid) + 'No values returned by %s for memory.usage ' + 'or disk.root.size', instance.uuid) raise exception.NoDataFound self.ceilometer_instance_data_cache[instance.uuid] = dict( @@ -475,7 +475,7 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy): :param original_model: root_model object """ - LOG.info(_LI('Executing Smart Strategy')) + LOG.info('Executing Smart Strategy') rcu = self.get_relative_cluster_utilization() cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0} diff --git a/watcher/decision_engine/strategy/strategies/workload_balance.py b/watcher/decision_engine/strategy/strategies/workload_balance.py index efdd07849..0f3067299 100644 --- a/watcher/decision_engine/strategy/strategies/workload_balance.py +++ b/watcher/decision_engine/strategy/strategies/workload_balance.py @@ -49,7 +49,7 @@ hosts nodes. from oslo_log import log -from watcher._i18n import _, _LE, _LI, _LW +from watcher._i18n import _ from watcher.common import exception as wexc from watcher.datasource import ceilometer as ceil from watcher.decision_engine.model import element @@ -187,14 +187,14 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy): min_delta = current_delta instance_id = instance.uuid except wexc.InstanceNotFound: - LOG.error(_LE("Instance not found; error: %s"), + LOG.error("Instance not found; error: %s", instance_id) if instance_id: return (source_node, self.compute_model.get_instance_by_uuid( instance_id)) else: - LOG.info(_LI("VM not found from node: %s"), + LOG.info("VM not found from node: %s", source_node.uuid) def filter_destination_hosts(self, hosts, instance_to_migrate, @@ -259,7 +259,7 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy): aggregate='avg') except Exception as exc: LOG.exception(exc) - LOG.error(_LE("Can not get cpu_util from Ceilometer")) + LOG.error("Can not get cpu_util from Ceilometer") continue if cpu_util is None: LOG.debug("Instance (%s): cpu_util is None", instance.uuid) @@ -289,7 +289,7 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy): This can be used to fetch some pre-requisites or data. """ - LOG.info(_LI("Initializing Workload Balance Strategy")) + LOG.info("Initializing Workload Balance Strategy") if not self.compute_model: raise wexc.ClusterStateNotDefined() @@ -314,9 +314,9 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy): return self.solution if not target_nodes: - LOG.warning(_LW("No hosts current have CPU utilization under %s " - "percent, therefore there are no possible target " - "hosts for any migration"), + LOG.warning("No hosts current have CPU utilization under %s " + "percent, therefore there are no possible target " + "hosts for any migration", self.threshold) return self.solution @@ -337,8 +337,8 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy): # pick up the lowest one as dest server if not destination_hosts: # for instance. - LOG.warning(_LW("No proper target host could be found, it might " - "be because of there's no enough CPU/Memory/DISK")) + LOG.warning("No proper target host could be found, it might " + "be because of there's no enough CPU/Memory/DISK") return self.solution destination_hosts = sorted(destination_hosts, key=lambda x: (x["cpu_util"])) diff --git a/watcher/decision_engine/strategy/strategies/workload_stabilization.py b/watcher/decision_engine/strategy/strategies/workload_stabilization.py index 11ecd5220..195c86f7d 100644 --- a/watcher/decision_engine/strategy/strategies/workload_stabilization.py +++ b/watcher/decision_engine/strategy/strategies/workload_stabilization.py @@ -38,7 +38,7 @@ from oslo_config import cfg from oslo_log import log import oslo_utils -from watcher._i18n import _LI, _LW, _ +from watcher._i18n import _ from watcher.common import exception from watcher.datasource import ceilometer as ceil from watcher.decision_engine.model import element @@ -202,10 +202,9 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy): ) if avg_meter is None: LOG.warning( - _LW("No values returned by %(resource_id)s " - "for %(metric_name)s") % dict( - resource_id=instance.uuid, - metric_name=meter)) + "No values returned by %(resource_id)s " + "for %(metric_name)s" % dict( + resource_id=instance.uuid, metric_name=meter)) avg_meter = 0 if meter == 'cpu_util': avg_meter /= float(100) @@ -399,7 +398,7 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy): return self.solution def pre_execute(self): - LOG.info(_LI("Initializing Workload Stabilization")) + LOG.info("Initializing Workload Stabilization") if not self.compute_model: raise exception.ClusterStateNotDefined() diff --git a/watcher/decision_engine/sync.py b/watcher/decision_engine/sync.py index 079431597..17c331834 100644 --- a/watcher/decision_engine/sync.py +++ b/watcher/decision_engine/sync.py @@ -19,7 +19,6 @@ import collections from oslo_log import log -from watcher._i18n import _LI, _LW from watcher.common import context from watcher.decision_engine.loading import default from watcher.decision_engine.scoring import scoring_factory @@ -136,7 +135,7 @@ class Syncer(object): for goal_name, goal_map in goals_map.items(): if goal_map in self.available_goals_map: - LOG.info(_LI("Goal %s already exists"), goal_name) + LOG.info("Goal %s already exists", goal_name) continue self.goal_mapping.update(self._sync_goal(goal_map)) @@ -145,14 +144,14 @@ class Syncer(object): if (strategy_map in self.available_strategies_map and strategy_map.goal_name not in [g.name for g in self.goal_mapping.values()]): - LOG.info(_LI("Strategy %s already exists"), strategy_name) + LOG.info("Strategy %s already exists", strategy_name) continue self.strategy_mapping.update(self._sync_strategy(strategy_map)) for se_name, se_map in scoringengines_map.items(): if se_map in self.available_scoringengines_map: - LOG.info(_LI("Scoring Engine %s already exists"), + LOG.info("Scoring Engine %s already exists", se_name) continue @@ -177,7 +176,7 @@ class Syncer(object): indicator._asdict() for indicator in goal_map.efficacy_specification] goal.create() - LOG.info(_LI("Goal %s created"), goal_name) + LOG.info("Goal %s created", goal_name) # Updating the internal states self.available_goals_map[goal] = goal_map @@ -208,7 +207,7 @@ class Syncer(object): strategy.goal_id = objects.Goal.get_by_name(self.ctx, goal_name).id strategy.parameters_spec = parameters_spec strategy.create() - LOG.info(_LI("Strategy %s created"), strategy_name) + LOG.info("Strategy %s created", strategy_name) # Updating the internal states self.available_strategies_map[strategy] = strategy_map @@ -233,7 +232,7 @@ class Syncer(object): scoringengine.description = scoringengine_map.description scoringengine.metainfo = scoringengine_map.metainfo scoringengine.create() - LOG.info(_LI("Scoring Engine %s created"), scoringengine_name) + LOG.info("Scoring Engine %s created", scoringengine_name) # Updating the internal states self.available_scoringengines_map[scoringengine] = \ @@ -270,17 +269,17 @@ class Syncer(object): # and soft delete stale audits and action plans for stale_audit_template in self.stale_audit_templates_map.values(): stale_audit_template.save() - LOG.info(_LI("Audit Template '%s' synced"), + LOG.info("Audit Template '%s' synced", stale_audit_template.name) for stale_audit in self.stale_audits_map.values(): stale_audit.save() - LOG.info(_LI("Stale audit '%s' synced and cancelled"), + LOG.info("Stale audit '%s' synced and cancelled", stale_audit.uuid) for stale_action_plan in self.stale_action_plans_map.values(): stale_action_plan.save() - LOG.info(_LI("Stale action plan '%s' synced and cancelled"), + LOG.info("Stale action plan '%s' synced and cancelled", stale_action_plan.uuid) def _find_stale_audit_templates_due_to_goal(self): @@ -395,15 +394,15 @@ class Syncer(object): invalid_ats = objects.AuditTemplate.list(self.ctx, filters=filters) for at in invalid_ats: LOG.warning( - _LW("Audit Template '%(audit_template)s' references a " - "goal that does not exist"), audit_template=at.uuid) + "Audit Template '%(audit_template)s' references a " + "goal that does not exist", audit_template=at.uuid) stale_audits = objects.Audit.list( self.ctx, filters=filters, eager=True) for audit in stale_audits: LOG.warning( - _LW("Audit '%(audit)s' references a " - "goal that does not exist"), audit=audit.uuid) + "Audit '%(audit)s' references a " + "goal that does not exist", audit=audit.uuid) if audit.id not in self.stale_audits_map: audit.state = objects.audit.State.CANCELLED self.stale_audits_map[audit.id] = audit @@ -422,8 +421,8 @@ class Syncer(object): invalid_ats = objects.AuditTemplate.list(self.ctx, filters=filters) for at in invalid_ats: LOG.info( - _LI("Audit Template '%(audit_template)s' references a " - "strategy that does not exist"), + "Audit Template '%(audit_template)s' references a " + "strategy that does not exist", audit_template=at.uuid) # In this case we can reset the strategy ID to None # so the audit template can still achieve the same goal @@ -438,8 +437,8 @@ class Syncer(object): self.ctx, filters=filters, eager=True) for audit in stale_audits: LOG.warning( - _LW("Audit '%(audit)s' references a " - "strategy that does not exist"), audit=audit.uuid) + "Audit '%(audit)s' references a " + "strategy that does not exist", audit=audit.uuid) if audit.id not in self.stale_audits_map: audit.state = objects.audit.State.CANCELLED self.stale_audits_map[audit.id] = audit @@ -451,8 +450,8 @@ class Syncer(object): self.ctx, filters=filters, eager=True) for action_plan in stale_action_plans: LOG.warning( - _LW("Action Plan '%(action_plan)s' references a " - "strategy that does not exist"), + "Action Plan '%(action_plan)s' references a " + "strategy that does not exist", action_plan=action_plan.uuid) if action_plan.id not in self.stale_action_plans_map: action_plan.state = objects.action_plan.State.CANCELLED @@ -467,7 +466,7 @@ class Syncer(object): se for se in self.available_scoringengines if se.name not in self.discovered_map['scoringengines']] for se in removed_se: - LOG.info(_LI("Scoring Engine %s removed"), se.name) + LOG.info("Scoring Engine %s removed", se.name) se.soft_delete() def _discover(self): @@ -526,9 +525,9 @@ class Syncer(object): for matching_goal in matching_goals: if (matching_goal.efficacy_specification == goal_efficacy_spec and matching_goal.display_name == goal_display_name): - LOG.info(_LI("Goal %s unchanged"), goal_name) + LOG.info("Goal %s unchanged", goal_name) else: - LOG.info(_LI("Goal %s modified"), goal_name) + LOG.info("Goal %s modified", goal_name) matching_goal.soft_delete() stale_goals.append(matching_goal) @@ -545,9 +544,9 @@ class Syncer(object): matching_strategy.goal_id not in self.goal_mapping and matching_strategy.parameters_spec == ast.literal_eval(parameters_spec)): - LOG.info(_LI("Strategy %s unchanged"), strategy_name) + LOG.info("Strategy %s unchanged", strategy_name) else: - LOG.info(_LI("Strategy %s modified"), strategy_name) + LOG.info("Strategy %s modified", strategy_name) matching_strategy.soft_delete() stale_strategies.append(matching_strategy) @@ -563,9 +562,9 @@ class Syncer(object): for matching_scoringengine in matching_scoringengines: if (matching_scoringengine.description == se_description and matching_scoringengine.metainfo == se_metainfo): - LOG.info(_LI("Scoring Engine %s unchanged"), se_name) + LOG.info("Scoring Engine %s unchanged", se_name) else: - LOG.info(_LI("Scoring Engine %s modified"), se_name) + LOG.info("Scoring Engine %s modified", se_name) matching_scoringengine.soft_delete() stale_scoringengines.append(matching_scoringengine)