Using LOG.warning replace LOG.warn
Python 3 deprecated the logger.warn method, see: https://docs.python.org/3/library/logging.html#logging.warning so we prefer to use warning to avoid DeprecationWarning. Closes-Bugs: #1529913 Change-Id: Icc01ce5fbd10880440cf75a2e0833394783464a0 Co-Authored-By: Gary Kotton <gkotton@vmware.com>
This commit is contained in:
parent
2768da320d
commit
83ef6b5677
@ -23,6 +23,8 @@ Neutron Specific Commandments
|
||||
- [N331] Detect wrong usage with assertTrue(isinstance()).
|
||||
- [N332] Use assertEqual(expected_http_code, observed_http_code) instead of
|
||||
assertEqual(observed_http_code, expected_http_code).
|
||||
- [N333] Validate that LOG.warning is used instead of LOG.warn. The latter
|
||||
is deprecated.
|
||||
|
||||
Creating Unit Tests
|
||||
-------------------
|
||||
|
@ -449,11 +449,11 @@ class OVSBridge(BaseOVS):
|
||||
if_exists=True)
|
||||
for result in results:
|
||||
if result['ofport'] == UNASSIGNED_OFPORT:
|
||||
LOG.warn(_LW("Found not yet ready openvswitch port: %s"),
|
||||
result['name'])
|
||||
LOG.warning(_LW("Found not yet ready openvswitch port: %s"),
|
||||
result['name'])
|
||||
elif result['ofport'] == INVALID_OFPORT:
|
||||
LOG.warn(_LW("Found failed openvswitch port: %s"),
|
||||
result['name'])
|
||||
LOG.warning(_LW("Found failed openvswitch port: %s"),
|
||||
result['name'])
|
||||
elif 'attached-mac' in result['external_ids']:
|
||||
port_id = self.portid_from_external_ids(result['external_ids'])
|
||||
if port_id:
|
||||
@ -511,9 +511,9 @@ class OVSBridge(BaseOVS):
|
||||
@staticmethod
|
||||
def _check_ofport(port_id, port_info):
|
||||
if port_info['ofport'] in [UNASSIGNED_OFPORT, INVALID_OFPORT]:
|
||||
LOG.warn(_LW("ofport: %(ofport)s for VIF: %(vif)s is not a"
|
||||
" positive integer"),
|
||||
{'ofport': port_info['ofport'], 'vif': port_id})
|
||||
LOG.warning(_LW("ofport: %(ofport)s for VIF: %(vif)s "
|
||||
"is not a positive integer"),
|
||||
{'ofport': port_info['ofport'], 'vif': port_id})
|
||||
return False
|
||||
return True
|
||||
|
||||
|
@ -203,7 +203,7 @@ class DhcpAgent(manager.Manager):
|
||||
try:
|
||||
network = self.plugin_rpc.get_network_info(network_id)
|
||||
if not network:
|
||||
LOG.warn(_LW('Network %s has been deleted.'), network_id)
|
||||
LOG.warning(_LW('Network %s has been deleted.'), network_id)
|
||||
return network
|
||||
except Exception as e:
|
||||
self.schedule_resync(e, network_id)
|
||||
@ -223,8 +223,9 @@ class DhcpAgent(manager.Manager):
|
||||
self.configure_dhcp_for_network(network)
|
||||
LOG.info(_LI('Finished network %s dhcp configuration'), network_id)
|
||||
except (exceptions.NetworkNotFound, RuntimeError):
|
||||
LOG.warn(_LW('Network %s may have been deleted and its resources '
|
||||
'may have already been disposed.'), network.id)
|
||||
LOG.warning(_LW('Network %s may have been deleted and '
|
||||
'its resources may have already been disposed.'),
|
||||
network.id)
|
||||
|
||||
def configure_dhcp_for_network(self, network):
|
||||
if not network.admin_state_up:
|
||||
@ -585,8 +586,8 @@ class DhcpAgentWithStateReport(DhcpAgent):
|
||||
self.schedule_resync("Agent has just been revived")
|
||||
except AttributeError:
|
||||
# This means the server does not support report_state
|
||||
LOG.warn(_LW("Neutron server does not support state report."
|
||||
" State report for this agent will be disabled."))
|
||||
LOG.warning(_LW("Neutron server does not support state report. "
|
||||
"State report for this agent will be disabled."))
|
||||
self.heartbeat.stop()
|
||||
self.run()
|
||||
return
|
||||
|
@ -362,8 +362,8 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
|
||||
def _router_removed(self, router_id):
|
||||
ri = self.router_info.get(router_id)
|
||||
if ri is None:
|
||||
LOG.warn(_LW("Info for router %s was not found. "
|
||||
"Performing router cleanup"), router_id)
|
||||
LOG.warning(_LW("Info for router %s was not found. "
|
||||
"Performing router cleanup"), router_id)
|
||||
self.namespaces_manager.ensure_router_cleanup(router_id)
|
||||
return
|
||||
|
||||
@ -683,8 +683,8 @@ class L3NATAgentWithStateReport(L3NATAgent):
|
||||
self.agent_state.pop('start_flag', None)
|
||||
except AttributeError:
|
||||
# This means the server does not support report_state
|
||||
LOG.warn(_LW("Neutron server does not support state report. "
|
||||
"State report for this agent will be disabled."))
|
||||
LOG.warning(_LW("Neutron server does not support state report. "
|
||||
"State report for this agent will be disabled."))
|
||||
self.heartbeat.stop()
|
||||
return
|
||||
except Exception:
|
||||
|
@ -209,10 +209,11 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase):
|
||||
return True
|
||||
else:
|
||||
if operation == 'add':
|
||||
LOG.warn(_LW("Device %s does not exist so ARP entry "
|
||||
"cannot be updated, will cache information "
|
||||
"to be applied later when the device exists"),
|
||||
device)
|
||||
LOG.warning(_LW("Device %s does not exist so ARP entry "
|
||||
"cannot be updated, will cache "
|
||||
"information to be applied later "
|
||||
"when the device exists"),
|
||||
device)
|
||||
self._cache_arp_entry(ip, mac, subnet_id, operation)
|
||||
return False
|
||||
except Exception:
|
||||
|
@ -265,8 +265,8 @@ class RouterInfo(object):
|
||||
except RuntimeError:
|
||||
# any exception occurred here should cause the floating IP
|
||||
# to be set in error state
|
||||
LOG.warn(_LW("Unable to configure IP address for "
|
||||
"floating IP: %s"), fip['id'])
|
||||
LOG.warning(_LW("Unable to configure IP address for "
|
||||
"floating IP: %s"), fip['id'])
|
||||
|
||||
def add_floating_ip(self, fip, interface_name, device):
|
||||
raise NotImplementedError()
|
||||
|
@ -250,10 +250,10 @@ class IptablesTable(object):
|
||||
top, self.wrap_name,
|
||||
comment=comment)))
|
||||
except ValueError:
|
||||
LOG.warn(_LW('Tried to remove rule that was not there:'
|
||||
' %(chain)r %(rule)r %(wrap)r %(top)r'),
|
||||
{'chain': chain, 'rule': rule,
|
||||
'top': top, 'wrap': wrap})
|
||||
LOG.warning(_LW('Tried to remove rule that was not there:'
|
||||
' %(chain)r %(rule)r %(wrap)r %(top)r'),
|
||||
{'chain': chain, 'rule': rule,
|
||||
'top': top, 'wrap': wrap})
|
||||
|
||||
def _get_chain_rules(self, chain, wrap):
|
||||
chain = get_chain_name(chain, wrap)
|
||||
@ -696,8 +696,8 @@ class IptablesManager(object):
|
||||
"""Return the sum of the traffic counters of all rules of a chain."""
|
||||
cmd_tables = self._get_traffic_counters_cmd_tables(chain, wrap)
|
||||
if not cmd_tables:
|
||||
LOG.warn(_LW('Attempted to get traffic counters of chain %s which '
|
||||
'does not exist'), chain)
|
||||
LOG.warning(_LW('Attempted to get traffic counters of chain %s '
|
||||
'which does not exist'), chain)
|
||||
return
|
||||
|
||||
name = get_chain_name(chain, wrap)
|
||||
|
@ -200,7 +200,7 @@ class MetadataProxyHandler(object):
|
||||
req.response.body = content
|
||||
return req.response
|
||||
elif resp.status == 403:
|
||||
LOG.warn(_LW(
|
||||
LOG.warning(_LW(
|
||||
'The remote metadata server responded with Forbidden. This '
|
||||
'response usually occurs when shared secrets do not match.'
|
||||
))
|
||||
@ -215,7 +215,7 @@ class MetadataProxyHandler(object):
|
||||
msg = _(
|
||||
'Remote metadata server experienced an internal server error.'
|
||||
)
|
||||
LOG.warn(msg)
|
||||
LOG.warning(msg)
|
||||
explanation = six.text_type(msg)
|
||||
return webob.exc.HTTPInternalServerError(explanation=explanation)
|
||||
else:
|
||||
@ -267,8 +267,8 @@ class UnixDomainMetadataProxy(object):
|
||||
use_call=self.agent_state.get('start_flag'))
|
||||
except AttributeError:
|
||||
# This means the server does not support report_state
|
||||
LOG.warn(_LW('Neutron server does not support state report.'
|
||||
' State report for this agent will be disabled.'))
|
||||
LOG.warning(_LW('Neutron server does not support state report.'
|
||||
' State report for this agent will be disabled.'))
|
||||
self.heartbeat.stop()
|
||||
return
|
||||
except Exception:
|
||||
|
@ -118,7 +118,7 @@ class PluginApi(object):
|
||||
# may not work correctly, however it can function in 'degraded'
|
||||
# mode, in that DVR routers may not be in the system yet, and
|
||||
# it might be not necessary to retrieve info about the host.
|
||||
LOG.warn(_LW('DVR functionality requires a server upgrade.'))
|
||||
LOG.warning(_LW('DVR functionality requires a server upgrade.'))
|
||||
res = [
|
||||
self.get_device_details(context, device, agent_id, host)
|
||||
for device in devices
|
||||
@ -196,7 +196,8 @@ class PluginApi(object):
|
||||
res = cctxt.call(context, 'tunnel_sync', tunnel_ip=tunnel_ip,
|
||||
tunnel_type=tunnel_type, host=host)
|
||||
except oslo_messaging.UnsupportedVersion:
|
||||
LOG.warn(_LW('Tunnel synchronization requires a server upgrade.'))
|
||||
LOG.warning(_LW('Tunnel synchronization requires a '
|
||||
'server upgrade.'))
|
||||
cctxt = self.client.prepare()
|
||||
res = cctxt.call(context, 'tunnel_sync', tunnel_ip=tunnel_ip,
|
||||
tunnel_type=tunnel_type)
|
||||
|
@ -63,8 +63,8 @@ def _is_valid_driver_combination():
|
||||
|
||||
def is_firewall_enabled():
|
||||
if not _is_valid_driver_combination():
|
||||
LOG.warn(_LW("Driver configuration doesn't match with "
|
||||
"enable_security_group"))
|
||||
LOG.warning(_LW("Driver configuration doesn't match with "
|
||||
"enable_security_group"))
|
||||
|
||||
return cfg.CONF.SECURITYGROUP.enable_security_group
|
||||
|
||||
@ -97,8 +97,8 @@ class SecurityGroupAgentRpc(object):
|
||||
firewall_driver = cfg.CONF.SECURITYGROUP.firewall_driver or 'noop'
|
||||
LOG.debug("Init firewall settings (driver=%s)", firewall_driver)
|
||||
if not _is_valid_driver_combination():
|
||||
LOG.warn(_LW("Driver configuration doesn't match "
|
||||
"with enable_security_group"))
|
||||
LOG.warning(_LW("Driver configuration doesn't match "
|
||||
"with enable_security_group"))
|
||||
firewall_class = firewall.load_firewall_driver_class(firewall_driver)
|
||||
try:
|
||||
self.firewall = firewall_class(
|
||||
|
@ -107,9 +107,9 @@ def _get_pagination_max_limit():
|
||||
if max_limit == 0:
|
||||
raise ValueError()
|
||||
except ValueError:
|
||||
LOG.warn(_LW("Invalid value for pagination_max_limit: %s. It "
|
||||
"should be an integer greater to 0"),
|
||||
cfg.CONF.pagination_max_limit)
|
||||
LOG.warning(_LW("Invalid value for pagination_max_limit: %s. It "
|
||||
"should be an integer greater to 0"),
|
||||
cfg.CONF.pagination_max_limit)
|
||||
return max_limit
|
||||
|
||||
|
||||
|
@ -532,17 +532,17 @@ class ExtensionManager(object):
|
||||
ext_name = mod_name[0].upper() + mod_name[1:]
|
||||
new_ext_class = getattr(mod, ext_name, None)
|
||||
if not new_ext_class:
|
||||
LOG.warn(_LW('Did not find expected name '
|
||||
'"%(ext_name)s" in %(file)s'),
|
||||
{'ext_name': ext_name,
|
||||
'file': ext_path})
|
||||
LOG.warning(_LW('Did not find expected name '
|
||||
'"%(ext_name)s" in %(file)s'),
|
||||
{'ext_name': ext_name,
|
||||
'file': ext_path})
|
||||
continue
|
||||
new_ext = new_ext_class()
|
||||
self.add_extension(new_ext)
|
||||
except Exception as exception:
|
||||
LOG.warn(_LW("Extension file %(f)s wasn't loaded due to "
|
||||
"%(exception)s"),
|
||||
{'f': f, 'exception': exception})
|
||||
LOG.warning(_LW("Extension file %(f)s wasn't loaded due to "
|
||||
"%(exception)s"),
|
||||
{'f': f, 'exception': exception})
|
||||
|
||||
def add_extension(self, ext):
|
||||
# Do nothing if the extension doesn't check out
|
||||
@ -578,9 +578,9 @@ class PluginAwareExtensionManager(ExtensionManager):
|
||||
alias = extension.get_alias()
|
||||
supports_extension = alias in self.get_supported_extension_aliases()
|
||||
if not supports_extension:
|
||||
LOG.warn(_LW("Extension %s not supported by any of loaded "
|
||||
"plugins"),
|
||||
alias)
|
||||
LOG.warning(_LW("Extension %s not supported by any of loaded "
|
||||
"plugins"),
|
||||
alias)
|
||||
return supports_extension
|
||||
|
||||
def _plugins_implement_interface(self, extension):
|
||||
@ -589,8 +589,9 @@ class PluginAwareExtensionManager(ExtensionManager):
|
||||
for plugin in self.plugins.values():
|
||||
if isinstance(plugin, extension.get_plugin_interface()):
|
||||
return True
|
||||
LOG.warn(_LW("Loaded plugins do not implement extension %s interface"),
|
||||
extension.get_alias())
|
||||
LOG.warning(_LW("Loaded plugins do not implement extension "
|
||||
"%s interface"),
|
||||
extension.get_alias())
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
|
@ -70,9 +70,10 @@ class DhcpAgentNotifyAPI(object):
|
||||
context, 'network_create_end',
|
||||
{'network': {'id': network['id']}}, agent['host'])
|
||||
elif not existing_agents:
|
||||
LOG.warn(_LW('Unable to schedule network %s: no agents available; '
|
||||
'will retry on subsequent port and subnet creation '
|
||||
'events.'), network['id'])
|
||||
LOG.warning(_LW('Unable to schedule network %s: no agents '
|
||||
'available; will retry on subsequent port '
|
||||
'and subnet creation events.'),
|
||||
network['id'])
|
||||
return new_agents + existing_agents
|
||||
|
||||
def _get_enabled_agents(self, context, network, agents, method, payload):
|
||||
@ -87,12 +88,13 @@ class DhcpAgentNotifyAPI(object):
|
||||
len_enabled_agents = len(enabled_agents)
|
||||
len_active_agents = len(active_agents)
|
||||
if len_active_agents < len_enabled_agents:
|
||||
LOG.warn(_LW("Only %(active)d of %(total)d DHCP agents associated "
|
||||
"with network '%(net_id)s' are marked as active, so "
|
||||
"notifications may be sent to inactive agents."),
|
||||
{'active': len_active_agents,
|
||||
'total': len_enabled_agents,
|
||||
'net_id': network_id})
|
||||
LOG.warning(_LW("Only %(active)d of %(total)d DHCP agents "
|
||||
"associated with network '%(net_id)s' "
|
||||
"are marked as active, so notifications "
|
||||
"may be sent to inactive agents."),
|
||||
{'active': len_active_agents,
|
||||
'total': len_enabled_agents,
|
||||
'net_id': network_id})
|
||||
if not enabled_agents:
|
||||
num_ports = self.plugin.get_ports_count(
|
||||
context, {'network_id': [network_id]})
|
||||
|
@ -104,9 +104,9 @@ class DhcpRpcCallback(object):
|
||||
else:
|
||||
ctxt.reraise = True
|
||||
net_id = port['port']['network_id']
|
||||
LOG.warn(_LW("Action %(action)s for network %(net_id)s "
|
||||
"could not complete successfully: %(reason)s"),
|
||||
{"action": action, "net_id": net_id, 'reason': e})
|
||||
LOG.warning(_LW("Action %(action)s for network %(net_id)s "
|
||||
"could not complete successfully: %(reason)s"),
|
||||
{"action": action, "net_id": net_id, 'reason': e})
|
||||
|
||||
def get_active_networks(self, context, **kwargs):
|
||||
"""Retrieve and return a list of the active network ids."""
|
||||
|
@ -198,8 +198,8 @@ class AgentDbMixin(ext_agent.AgentPluginBase, AgentAvailabilityZoneMixin):
|
||||
'%(host)s', {'agent_type': agent_type, 'host': host})
|
||||
return
|
||||
if self.is_agent_down(agent.heartbeat_timestamp):
|
||||
LOG.warn(_LW('%(agent_type)s agent %(agent_id)s is not active'),
|
||||
{'agent_type': agent_type, 'agent_id': agent.id})
|
||||
LOG.warning(_LW('%(agent_type)s agent %(agent_id)s is not active'),
|
||||
{'agent_type': agent_type, 'agent_id': agent.id})
|
||||
return agent
|
||||
|
||||
@staticmethod
|
||||
@ -222,9 +222,9 @@ class AgentDbMixin(ext_agent.AgentPluginBase, AgentAvailabilityZoneMixin):
|
||||
except Exception:
|
||||
msg = _LW('Dictionary %(dict_name)s for agent %(agent_type)s on '
|
||||
'host %(host)s is invalid.')
|
||||
LOG.warn(msg, {'dict_name': dict_name,
|
||||
'agent_type': agent_db.agent_type,
|
||||
'host': agent_db.host})
|
||||
LOG.warning(msg, {'dict_name': dict_name,
|
||||
'agent_type': agent_db.agent_type,
|
||||
'host': agent_db.host})
|
||||
conf = {}
|
||||
return conf
|
||||
|
||||
@ -286,11 +286,11 @@ class AgentDbMixin(ext_agent.AgentPluginBase, AgentAvailabilityZoneMixin):
|
||||
(agent['agent_type'],
|
||||
agent['heartbeat_timestamp'],
|
||||
agent['host']) for agent in dead_agents])
|
||||
LOG.warn(_LW("Agent healthcheck: found %(count)s dead agents "
|
||||
"out of %(total)s:\n%(data)s"),
|
||||
{'count': len(dead_agents),
|
||||
'total': len(agents),
|
||||
'data': data})
|
||||
LOG.warning(_LW("Agent healthcheck: found %(count)s dead agents "
|
||||
"out of %(total)s:\n%(data)s"),
|
||||
{'count': len(dead_agents),
|
||||
'total': len(agents),
|
||||
'data': data})
|
||||
else:
|
||||
LOG.debug("Agent healthcheck: found %s active agents",
|
||||
len(agents))
|
||||
|
@ -143,10 +143,11 @@ class AgentSchedulerDbMixin(agents_db.AgentDbMixin):
|
||||
tdelta = timeutils.utcnow() - getattr(self, '_clock_jump_canary',
|
||||
timeutils.utcnow())
|
||||
if tdelta.total_seconds() > cfg.CONF.agent_down_time:
|
||||
LOG.warn(_LW("Time since last %s agent reschedule check has "
|
||||
"exceeded the interval between checks. Waiting "
|
||||
"before check to allow agents to send a heartbeat "
|
||||
"in case there was a clock adjustment."), agent_type)
|
||||
LOG.warning(_LW("Time since last %s agent reschedule check has "
|
||||
"exceeded the interval between checks. Waiting "
|
||||
"before check to allow agents to send a heartbeat "
|
||||
"in case there was a clock adjustment."),
|
||||
agent_type)
|
||||
time.sleep(agent_dead_limit)
|
||||
self._clock_jump_canary = timeutils.utcnow()
|
||||
|
||||
@ -282,17 +283,17 @@ class DhcpAgentSchedulerDbMixin(dhcpagentscheduler
|
||||
active_agents = [agent for agent in agents if
|
||||
self.is_eligible_agent(context, True, agent)]
|
||||
if not active_agents:
|
||||
LOG.warn(_LW("No DHCP agents available, "
|
||||
"skipping rescheduling"))
|
||||
LOG.warning(_LW("No DHCP agents available, "
|
||||
"skipping rescheduling"))
|
||||
return
|
||||
for binding in dead_bindings:
|
||||
LOG.warn(_LW("Removing network %(network)s from agent "
|
||||
"%(agent)s because the agent did not report "
|
||||
"to the server in the last %(dead_time)s "
|
||||
"seconds."),
|
||||
{'network': binding.network_id,
|
||||
'agent': binding.dhcp_agent_id,
|
||||
'dead_time': agent_dead_limit})
|
||||
LOG.warning(_LW("Removing network %(network)s from agent "
|
||||
"%(agent)s because the agent did not report "
|
||||
"to the server in the last %(dead_time)s "
|
||||
"seconds."),
|
||||
{'network': binding.network_id,
|
||||
'agent': binding.dhcp_agent_id,
|
||||
'dead_time': agent_dead_limit})
|
||||
# save binding object to avoid ObjectDeletedError
|
||||
# in case binding is concurrently deleted from the DB
|
||||
saved_binding = {'net': binding.network_id,
|
||||
|
@ -118,7 +118,7 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase,
|
||||
agents_back_online.add(binding.l3_agent_id)
|
||||
continue
|
||||
|
||||
LOG.warn(_LW(
|
||||
LOG.warning(_LW(
|
||||
"Rescheduling router %(router)s from agent %(agent)s "
|
||||
"because the agent did not report to the server in "
|
||||
"the last %(dead_time)s seconds."),
|
||||
|
@ -375,8 +375,8 @@ class SecurityGroupServerRpcMixin(sg_db.SecurityGroupDbMixin):
|
||||
try:
|
||||
mac_address = query.one()[0]
|
||||
except (exc.NoResultFound, exc.MultipleResultsFound):
|
||||
LOG.warn(_LW('No valid gateway port on subnet %s is '
|
||||
'found for IPv6 RA'), subnet['id'])
|
||||
LOG.warning(_LW('No valid gateway port on subnet %s is '
|
||||
'found for IPv6 RA'), subnet['id'])
|
||||
return
|
||||
lla_ip = str(ipv6.get_ipv6_addr_by_EUI64(
|
||||
n_const.IPV6_LLA_PREFIX,
|
||||
|
@ -108,7 +108,7 @@ class NeutronDebugAgent(object):
|
||||
try:
|
||||
ip.netns.delete(namespace)
|
||||
except Exception:
|
||||
LOG.warn(_LW('Failed to delete namespace %s'), namespace)
|
||||
LOG.warning(_LW('Failed to delete namespace %s'), namespace)
|
||||
else:
|
||||
self.driver.unplug(self.driver.get_device_name(port),
|
||||
bridge=bridge)
|
||||
|
@ -35,7 +35,6 @@ _all_log_levels = {
|
||||
# a exception
|
||||
'error': '_LE',
|
||||
'info': '_LI',
|
||||
'warn': '_LW',
|
||||
'warning': '_LW',
|
||||
'critical': '_LC',
|
||||
'exception': '_LE',
|
||||
@ -55,6 +54,8 @@ log_translation_hint = re.compile(
|
||||
'|'.join('(?:%s)' % _regex_for_level(level, hint)
|
||||
for level, hint in six.iteritems(_all_log_levels)))
|
||||
|
||||
log_warn = re.compile(
|
||||
r"(.)*LOG\.(warn)\(\s*('|\"|_)")
|
||||
contextlib_nested = re.compile(r"^with (contextlib\.)?nested\(")
|
||||
|
||||
|
||||
@ -218,6 +219,12 @@ def check_assertequal_for_httpcode(logical_line, filename):
|
||||
yield (0, msg)
|
||||
|
||||
|
||||
def check_log_warn_deprecated(logical_line, filename):
|
||||
msg = "N333: Use LOG.warning due to compatibility with py3"
|
||||
if log_warn.match(logical_line):
|
||||
yield (0, msg)
|
||||
|
||||
|
||||
def factory(register):
|
||||
register(validate_log_translations)
|
||||
register(use_jsonutils)
|
||||
@ -233,3 +240,4 @@ def factory(register):
|
||||
register(check_assertempty)
|
||||
register(check_assertisinstance)
|
||||
register(check_assertequal_for_httpcode)
|
||||
register(check_log_warn_deprecated)
|
||||
|
@ -71,7 +71,7 @@ class ItemController(utils.NeutronPecanController):
|
||||
controller = manager.NeutronManager.get_controller_for_resource(
|
||||
collection)
|
||||
if not controller:
|
||||
LOG.warn(_LW("No controller found for: %s - returning response "
|
||||
LOG.warning(_LW("No controller found for: %s - returning response "
|
||||
"code 404"), collection)
|
||||
pecan.abort(404)
|
||||
return controller, remainder
|
||||
|
@ -93,8 +93,8 @@ class V2Controller(object):
|
||||
controller = manager.NeutronManager.get_controller_for_resource(
|
||||
collection)
|
||||
if not controller:
|
||||
LOG.warn(_LW("No controller found for: %s - returning response "
|
||||
"code 404"), collection)
|
||||
LOG.warning(_LW("No controller found for: %s - returning response "
|
||||
"code 404"), collection)
|
||||
pecan.abort(404)
|
||||
# Store resource and collection names in pecan request context so that
|
||||
# hooks can leverage them if necessary. The following code uses
|
||||
|
@ -52,7 +52,7 @@ def _plugin_for_resource(collection):
|
||||
hasattr(plugin, 'get_%s' % collection)):
|
||||
# This plugin implements this resource
|
||||
return plugin
|
||||
LOG.warn(_LW("No plugin found for:%s"), collection)
|
||||
LOG.warning(_LW("No plugin found for: %s"), collection)
|
||||
|
||||
|
||||
def _handle_plurals(collection):
|
||||
@ -127,15 +127,15 @@ def initialize_all():
|
||||
manager.NeutronManager.set_plugin_for_resource(
|
||||
resource, plugin)
|
||||
else:
|
||||
LOG.warn(_LW("No plugin found for resource:%s. API calls "
|
||||
"may not be correctly dispatched"), resource)
|
||||
LOG.warning(_LW("No plugin found for resource:%s. API calls "
|
||||
"may not be correctly dispatched"), resource)
|
||||
|
||||
controller = pecan_controllers.get(collection)
|
||||
if not controller:
|
||||
LOG.debug("Building controller for resource:%s", resource)
|
||||
controller = res_ctrl.CollectionsController(collection, resource)
|
||||
else:
|
||||
LOG.debug("There are already controllers for resource:%s",
|
||||
LOG.debug("There are already controllers for resource: %s",
|
||||
resource)
|
||||
|
||||
manager.NeutronManager.set_controller_for_resource(
|
||||
|
@ -24,10 +24,10 @@ LOG = logging.getLogger(__name__)
|
||||
# TODO(claudiub): Remove this module at the beginning of the O cycle.
|
||||
|
||||
new_driver = 'hyperv.neutron.security_groups_driver.HyperVSecurityGroupsDriver'
|
||||
LOG.warn(_LW("You are using the deprecated firewall driver: %(deprecated)s. "
|
||||
"Use the recommended driver %(new)s instead."),
|
||||
{'deprecated': '%s.HyperVSecurityGroupsDriver' % __name__,
|
||||
'new': new_driver})
|
||||
LOG.warning(_LW("You are using the deprecated firewall driver: "
|
||||
"%(deprecated)s.Use the recommended driver %(new)s instead."),
|
||||
{'deprecated': '%s.HyperVSecurityGroupsDriver' % __name__,
|
||||
'new': new_driver})
|
||||
|
||||
HyperVSecurityGroupsDriver = moves.moved_class(
|
||||
sg_driver.HyperVSecurityGroupsDriver,
|
||||
|
@ -326,7 +326,7 @@ class SriovNicSwitchAgent(object):
|
||||
self.ext_manager.delete_port(self.context, port)
|
||||
else:
|
||||
LOG.warning(_LW("port_id to device with MAC "
|
||||
"%s not found"), mac)
|
||||
"%s not found"), mac)
|
||||
dev_details = self.plugin_rpc.update_device_down(self.context,
|
||||
mac,
|
||||
self.agent_id,
|
||||
|
@ -138,8 +138,8 @@ class OpenFlowSwitchMixin(object):
|
||||
cookies = set([f.cookie for f in self.dump_flows()]) - \
|
||||
self.reserved_cookies
|
||||
for c in cookies:
|
||||
LOG.warn(_LW("Deleting flow with cookie 0x%(cookie)x") % {
|
||||
'cookie': c})
|
||||
LOG.warning(_LW("Deleting flow with cookie 0x%(cookie)x"),
|
||||
{'cookie': c})
|
||||
self.delete_flows(cookie=c, cookie_mask=((1 << 64) - 1))
|
||||
|
||||
def install_goto_next(self, table_id):
|
||||
|
@ -1368,8 +1368,9 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin,
|
||||
# error condition of which operators should be aware
|
||||
port_needs_binding = True
|
||||
if not vif_port.ofport:
|
||||
LOG.warn(_LW("VIF port: %s has no ofport configured, "
|
||||
"and might not be able to transmit"), vif_port.vif_id)
|
||||
LOG.warning(_LW("VIF port: %s has no ofport configured, "
|
||||
"and might not be able to transmit"),
|
||||
vif_port.vif_id)
|
||||
if vif_port:
|
||||
if admin_state_up:
|
||||
port_needs_binding = self.port_bound(
|
||||
@ -1648,7 +1649,7 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin,
|
||||
try:
|
||||
return '%08x' % netaddr.IPAddress(ip_address, version=4)
|
||||
except Exception:
|
||||
LOG.warn(_LW("Invalid remote IP: %s"), ip_address)
|
||||
LOG.warning(_LW("Invalid remote IP: %s"), ip_address)
|
||||
return
|
||||
|
||||
def tunnel_sync(self):
|
||||
@ -1701,11 +1702,11 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin,
|
||||
# Check for the canary flow
|
||||
status = self.int_br.check_canary_table()
|
||||
if status == constants.OVS_RESTARTED:
|
||||
LOG.warn(_LW("OVS is restarted. OVSNeutronAgent will reset "
|
||||
"bridges and recover ports."))
|
||||
LOG.warning(_LW("OVS is restarted. OVSNeutronAgent will reset "
|
||||
"bridges and recover ports."))
|
||||
elif status == constants.OVS_DEAD:
|
||||
LOG.warn(_LW("OVS is dead. OVSNeutronAgent will keep running "
|
||||
"and checking OVS status periodically."))
|
||||
LOG.warning(_LW("OVS is dead. OVSNeutronAgent will keep running "
|
||||
"and checking OVS status periodically."))
|
||||
return status
|
||||
|
||||
def loop_count_and_wait(self, start_time, port_stats):
|
||||
@ -1760,7 +1761,7 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin,
|
||||
consecutive_resyncs = consecutive_resyncs + 1
|
||||
if (consecutive_resyncs >=
|
||||
constants.MAX_DEVICE_RETRIES):
|
||||
LOG.warn(_LW(
|
||||
LOG.warning(_LW(
|
||||
"Clearing cache of registered ports,"
|
||||
" retries to resync were > %s"),
|
||||
constants.MAX_DEVICE_RETRIES)
|
||||
|
@ -352,7 +352,7 @@ class MechanismManager(stevedore.named.NamedExtensionManager):
|
||||
else:
|
||||
# at least one of drivers does not support QoS, meaning
|
||||
# there are no rule types supported by all of them
|
||||
LOG.warn(
|
||||
LOG.warning(
|
||||
_LW("%s does not support QoS; "
|
||||
"no rule types available"),
|
||||
driver.name)
|
||||
|
@ -109,8 +109,9 @@ def _build_subattr_match_rule(attr_name, attr, action, target):
|
||||
validate = attr['validate']
|
||||
key = list(filter(lambda k: k.startswith('type:dict'), validate.keys()))
|
||||
if not key:
|
||||
LOG.warn(_LW("Unable to find data type descriptor for attribute %s"),
|
||||
attr_name)
|
||||
LOG.warning(_LW("Unable to find data type descriptor "
|
||||
"for attribute %s"),
|
||||
attr_name)
|
||||
return
|
||||
data = validate[key[0]]
|
||||
if not isinstance(data, dict):
|
||||
|
@ -212,7 +212,7 @@ class ResourceRegistry(object):
|
||||
|
||||
def register_resource(self, resource):
|
||||
if resource.name in self._resources:
|
||||
LOG.warn(_LW('%s is already registered'), resource.name)
|
||||
LOG.warning(_LW('%s is already registered'), resource.name)
|
||||
if resource.name in self._tracked_resource_mappings:
|
||||
resource.register_events()
|
||||
self._resources[resource.name] = resource
|
||||
|
@ -59,7 +59,8 @@ class AutoScheduler(object):
|
||||
for dhcp_agent in dhcp_agents:
|
||||
if agents_db.AgentDbMixin.is_agent_down(
|
||||
dhcp_agent.heartbeat_timestamp):
|
||||
LOG.warn(_LW('DHCP agent %s is not active'), dhcp_agent.id)
|
||||
LOG.warning(_LW('DHCP agent %s is not active'),
|
||||
dhcp_agent.id)
|
||||
continue
|
||||
for net_id in net_ids:
|
||||
agents = plugin.get_dhcp_agents_hosting_networks(
|
||||
@ -207,7 +208,7 @@ class DhcpFilter(base_resource_filter.BaseResourceFilter):
|
||||
active_dhcp_agents = plugin.get_agents_db(
|
||||
context, filters=filters)
|
||||
if not active_dhcp_agents:
|
||||
LOG.warn(_LW('No more DHCP agents'))
|
||||
LOG.warning(_LW('No more DHCP agents'))
|
||||
return []
|
||||
return active_dhcp_agents
|
||||
|
||||
|
@ -145,8 +145,8 @@ class L3Scheduler(object):
|
||||
target_routers = self._get_routers_can_schedule(
|
||||
context, plugin, unscheduled_routers, l3_agent)
|
||||
if not target_routers:
|
||||
LOG.warn(_LW('No routers compatible with L3 agent configuration '
|
||||
'on host %s'), host)
|
||||
LOG.warning(_LW('No routers compatible with L3 agent '
|
||||
'configuration on host %s'), host)
|
||||
return False
|
||||
|
||||
self._bind_routers(context, plugin, target_routers, l3_agent)
|
||||
@ -170,14 +170,14 @@ class L3Scheduler(object):
|
||||
|
||||
active_l3_agents = plugin.get_l3_agents(context, active=True)
|
||||
if not active_l3_agents:
|
||||
LOG.warn(_LW('No active L3 agents'))
|
||||
LOG.warning(_LW('No active L3 agents'))
|
||||
return []
|
||||
candidates = plugin.get_l3_agent_candidates(context,
|
||||
sync_router,
|
||||
active_l3_agents)
|
||||
if not candidates:
|
||||
LOG.warn(_LW('No L3 agents can host the router %s'),
|
||||
sync_router['id'])
|
||||
LOG.warning(_LW('No L3 agents can host the router %s'),
|
||||
sync_router['id'])
|
||||
|
||||
return candidates
|
||||
|
||||
|
@ -140,7 +140,7 @@ class BgpDrAgentSchedulerBase(BgpDrAgentFilter):
|
||||
|
||||
if agents_db.AgentDbMixin.is_agent_down(
|
||||
bgp_dragent.heartbeat_timestamp):
|
||||
LOG.warn(_LW('BgpDrAgent %s is down'), bgp_dragent.id)
|
||||
LOG.warning(_LW('BgpDrAgent %s is down'), bgp_dragent.id)
|
||||
return False
|
||||
|
||||
if self._is_bgp_speaker_hosted(context, bgp_dragent['id']):
|
||||
|
@ -276,8 +276,8 @@ class MeteringAgentWithStateReport(MeteringAgent):
|
||||
self.use_call = False
|
||||
except AttributeError:
|
||||
# This means the server does not support report_state
|
||||
LOG.warn(_LW("Neutron server does not support state report."
|
||||
" State report for this agent will be disabled."))
|
||||
LOG.warning(_LW("Neutron server does not support state report. "
|
||||
"State report for this agent will be disabled."))
|
||||
self.heartbeat.stop()
|
||||
return
|
||||
except Exception:
|
||||
|
@ -707,7 +707,7 @@ class TestDhcpAgentEventHandler(base.BaseTestCase):
|
||||
|
||||
def test_enable_dhcp_helper_network_none(self):
|
||||
self.plugin.get_network_info.return_value = None
|
||||
with mock.patch.object(dhcp_agent.LOG, 'warn') as log:
|
||||
with mock.patch.object(dhcp_agent.LOG, 'warning') as log:
|
||||
self.dhcp.enable_dhcp_helper('fake_id')
|
||||
self.plugin.assert_has_calls(
|
||||
[mock.call.get_network_info('fake_id')])
|
||||
|
@ -927,7 +927,7 @@ class IptablesManagerStateFulTestCase(base.BaseTestCase):
|
||||
def test_remove_nonexistent_rule(self):
|
||||
with mock.patch.object(iptables_manager, "LOG") as log:
|
||||
self.iptables.ipv4['filter'].remove_rule('nonexistent', '-j DROP')
|
||||
log.warn.assert_called_once_with(
|
||||
log.warning.assert_called_once_with(
|
||||
'Tried to remove rule that was not there: '
|
||||
'%(chain)r %(rule)r %(wrap)r %(top)r',
|
||||
{'wrap': True, 'top': False, 'rule': '-j DROP',
|
||||
@ -1001,7 +1001,7 @@ class IptablesManagerStateFulTestCase(base.BaseTestCase):
|
||||
acc = self.iptables.get_traffic_counters('chain1')
|
||||
self.assertIsNone(acc)
|
||||
self.assertEqual(0, self.execute.call_count)
|
||||
log.warn.assert_called_once_with(
|
||||
log.warning.assert_called_once_with(
|
||||
'Attempted to get traffic counters of chain %s which '
|
||||
'does not exist', 'chain1')
|
||||
|
||||
|
@ -51,7 +51,7 @@ class TestDhcpAgentNotifyAPI(base.BaseTestCase):
|
||||
new_agents = []
|
||||
self.assertEqual(new_agents + existing_agents, agents)
|
||||
self.assertEqual(expected_casts, self.mock_cast.call_count)
|
||||
self.assertEqual(expected_warnings, self.mock_log.warn.call_count)
|
||||
self.assertEqual(expected_warnings, self.mock_log.warning.call_count)
|
||||
|
||||
def test__schedule_network(self):
|
||||
agent = agents_db.Agent()
|
||||
@ -86,7 +86,7 @@ class TestDhcpAgentNotifyAPI(base.BaseTestCase):
|
||||
if not cfg.CONF.enable_services_on_agents_with_admin_state_down:
|
||||
agents = [x for x in agents if x.admin_state_up]
|
||||
self.assertEqual(agents, enabled_agents)
|
||||
self.assertEqual(expected_warnings, self.mock_log.warn.call_count)
|
||||
self.assertEqual(expected_warnings, self.mock_log.warning.call_count)
|
||||
self.assertEqual(expected_errors, self.mock_log.error.call_count)
|
||||
|
||||
def test__get_enabled_agents(self):
|
||||
|
@ -182,7 +182,7 @@ class TestAgentsDbMixin(TestAgentsDbBase):
|
||||
'alive': True}]
|
||||
with mock.patch.object(self.plugin, 'get_agents',
|
||||
return_value=agents),\
|
||||
mock.patch.object(agents_db.LOG, 'warn') as warn,\
|
||||
mock.patch.object(agents_db.LOG, 'warning') as warn,\
|
||||
mock.patch.object(agents_db.LOG, 'debug') as debug:
|
||||
self.plugin.agent_health_check()
|
||||
self.assertTrue(debug.called)
|
||||
|
@ -29,7 +29,6 @@ class HackingTestCase(base.BaseTestCase):
|
||||
expected_marks = {
|
||||
'error': '_LE',
|
||||
'info': '_LI',
|
||||
'warn': '_LW',
|
||||
'warning': '_LW',
|
||||
'critical': '_LC',
|
||||
'exception': '_LE',
|
||||
|
Loading…
Reference in New Issue
Block a user