Make code follow log translation guideline
Since Pike log messages should not be translated. This patch removes calls to i18n _LC, _LI, _LE, _LW from logging logic throughout the code. Translators definition from neutron._i18n is removed as well. This patch also removes log translation verification from ignore directive in tox.ini. Change-Id: If9aa76fcf121c0e61a7c08088006c5873faee56e
This commit is contained in:
parent
c65e541b6e
commit
7322bd6efb
@ -27,16 +27,6 @@ _C = _translators.contextual_form
|
|||||||
# The plural translation function using the name "_P"
|
# The plural translation function using the name "_P"
|
||||||
_P = _translators.plural_form
|
_P = _translators.plural_form
|
||||||
|
|
||||||
# Translators for log levels.
|
|
||||||
#
|
|
||||||
# The abbreviated names are meant to reflect the usual use of a short
|
|
||||||
# name like '_'. The "L" is for "log" and the other letter comes from
|
|
||||||
# the level.
|
|
||||||
_LI = _translators.log_info
|
|
||||||
_LW = _translators.log_warning
|
|
||||||
_LE = _translators.log_error
|
|
||||||
_LC = _translators.log_critical
|
|
||||||
|
|
||||||
|
|
||||||
def get_available_languages():
|
def get_available_languages():
|
||||||
return oslo_i18n.get_available_languages(DOMAIN)
|
return oslo_i18n.get_available_languages(DOMAIN)
|
||||||
|
@ -13,7 +13,6 @@
|
|||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
import stevedore
|
import stevedore
|
||||||
|
|
||||||
from neutron._i18n import _LI
|
|
||||||
from neutron.conf.agent import agent_extensions_manager as agent_ext_mgr_config
|
from neutron.conf.agent import agent_extensions_manager as agent_ext_mgr_config
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
@ -28,7 +27,7 @@ class AgentExtensionsManager(stevedore.named.NamedExtensionManager):
|
|||||||
super(AgentExtensionsManager, self).__init__(
|
super(AgentExtensionsManager, self).__init__(
|
||||||
namespace, conf.agent.extensions,
|
namespace, conf.agent.extensions,
|
||||||
invoke_on_load=True, name_order=True)
|
invoke_on_load=True, name_order=True)
|
||||||
LOG.info(_LI("Loaded agent extensions: %s"), self.names())
|
LOG.info("Loaded agent extensions: %s", self.names())
|
||||||
|
|
||||||
def initialize(self, connection, driver_type, agent_api=None):
|
def initialize(self, connection, driver_type, agent_api=None):
|
||||||
"""Initialize enabled agent extensions.
|
"""Initialize enabled agent extensions.
|
||||||
@ -44,7 +43,7 @@ class AgentExtensionsManager(stevedore.named.NamedExtensionManager):
|
|||||||
"""
|
"""
|
||||||
# Initialize each agent extension in the list.
|
# Initialize each agent extension in the list.
|
||||||
for extension in self:
|
for extension in self:
|
||||||
LOG.info(_LI("Initializing agent extension '%s'"), extension.name)
|
LOG.info("Initializing agent extension '%s'", extension.name)
|
||||||
# If the agent has provided an agent_api object, this object will
|
# If the agent has provided an agent_api object, this object will
|
||||||
# be passed to all interested extensions. This object must be
|
# be passed to all interested extensions. This object must be
|
||||||
# consumed by each such extension before the extension's
|
# consumed by each such extension before the extension's
|
||||||
|
@ -27,7 +27,7 @@ from oslo_log import log as logging
|
|||||||
import six
|
import six
|
||||||
import tenacity
|
import tenacity
|
||||||
|
|
||||||
from neutron._i18n import _, _LE, _LI, _LW
|
from neutron._i18n import _
|
||||||
from neutron.agent.common import ip_lib
|
from neutron.agent.common import ip_lib
|
||||||
from neutron.agent.common import utils
|
from neutron.agent.common import utils
|
||||||
from neutron.agent.ovsdb import api as ovsdb_api
|
from neutron.agent.ovsdb import api as ovsdb_api
|
||||||
@ -298,8 +298,8 @@ class OVSBridge(BaseOVS):
|
|||||||
"in 1 second. Attempt: %s/10", i)
|
"in 1 second. Attempt: %s/10", i)
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
continue
|
continue
|
||||||
LOG.error(_LE("Unable to execute %(cmd)s. Exception: "
|
LOG.error("Unable to execute %(cmd)s. Exception: "
|
||||||
"%(exception)s"),
|
"%(exception)s",
|
||||||
{'cmd': full_args, 'exception': e})
|
{'cmd': full_args, 'exception': e})
|
||||||
break
|
break
|
||||||
|
|
||||||
@ -320,7 +320,7 @@ class OVSBridge(BaseOVS):
|
|||||||
try:
|
try:
|
||||||
ofport = self._get_port_val(port_name, "ofport")
|
ofport = self._get_port_val(port_name, "ofport")
|
||||||
except tenacity.RetryError:
|
except tenacity.RetryError:
|
||||||
LOG.exception(_LE("Timed out retrieving ofport on port %s."),
|
LOG.exception("Timed out retrieving ofport on port %s.",
|
||||||
port_name)
|
port_name)
|
||||||
return ofport
|
return ofport
|
||||||
|
|
||||||
@ -330,7 +330,7 @@ class OVSBridge(BaseOVS):
|
|||||||
try:
|
try:
|
||||||
port_external_ids = self._get_port_val(port_name, "external_ids")
|
port_external_ids = self._get_port_val(port_name, "external_ids")
|
||||||
except tenacity.RetryError:
|
except tenacity.RetryError:
|
||||||
LOG.exception(_LE("Timed out retrieving external_ids on port %s."),
|
LOG.exception("Timed out retrieving external_ids on port %s.",
|
||||||
port_name)
|
port_name)
|
||||||
return port_external_ids
|
return port_external_ids
|
||||||
|
|
||||||
@ -526,10 +526,10 @@ class OVSBridge(BaseOVS):
|
|||||||
if_exists=True)
|
if_exists=True)
|
||||||
for result in results:
|
for result in results:
|
||||||
if result['ofport'] == UNASSIGNED_OFPORT:
|
if result['ofport'] == UNASSIGNED_OFPORT:
|
||||||
LOG.warning(_LW("Found not yet ready openvswitch port: %s"),
|
LOG.warning("Found not yet ready openvswitch port: %s",
|
||||||
result['name'])
|
result['name'])
|
||||||
elif result['ofport'] == INVALID_OFPORT:
|
elif result['ofport'] == INVALID_OFPORT:
|
||||||
LOG.warning(_LW("Found failed openvswitch port: %s"),
|
LOG.warning("Found failed openvswitch port: %s",
|
||||||
result['name'])
|
result['name'])
|
||||||
elif 'attached-mac' in result['external_ids']:
|
elif 'attached-mac' in result['external_ids']:
|
||||||
port_id = self.portid_from_external_ids(result['external_ids'])
|
port_id = self.portid_from_external_ids(result['external_ids'])
|
||||||
@ -569,8 +569,8 @@ class OVSBridge(BaseOVS):
|
|||||||
for port_id in port_ids:
|
for port_id in port_ids:
|
||||||
result[port_id] = None
|
result[port_id] = None
|
||||||
if port_id not in by_id:
|
if port_id not in by_id:
|
||||||
LOG.info(_LI("Port %(port_id)s not present in bridge "
|
LOG.info("Port %(port_id)s not present in bridge "
|
||||||
"%(br_name)s"),
|
"%(br_name)s",
|
||||||
{'port_id': port_id, 'br_name': self.br_name})
|
{'port_id': port_id, 'br_name': self.br_name})
|
||||||
continue
|
continue
|
||||||
pinfo = by_id[port_id]
|
pinfo = by_id[port_id]
|
||||||
@ -584,8 +584,8 @@ class OVSBridge(BaseOVS):
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
def _check_ofport(port_id, port_info):
|
def _check_ofport(port_id, port_info):
|
||||||
if port_info['ofport'] in [UNASSIGNED_OFPORT, INVALID_OFPORT]:
|
if port_info['ofport'] in [UNASSIGNED_OFPORT, INVALID_OFPORT]:
|
||||||
LOG.warning(_LW("ofport: %(ofport)s for VIF: %(vif)s "
|
LOG.warning("ofport: %(ofport)s for VIF: %(vif)s "
|
||||||
"is not a positive integer"),
|
"is not a positive integer",
|
||||||
{'ofport': port_info['ofport'], 'vif': port_id})
|
{'ofport': port_info['ofport'], 'vif': port_id})
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
@ -602,7 +602,7 @@ class OVSBridge(BaseOVS):
|
|||||||
continue
|
continue
|
||||||
mac = port['external_ids'].get('attached-mac')
|
mac = port['external_ids'].get('attached-mac')
|
||||||
return VifPort(port['name'], port['ofport'], port_id, mac, self)
|
return VifPort(port['name'], port['ofport'], port_id, mac, self)
|
||||||
LOG.info(_LI("Port %(port_id)s not present in bridge %(br_name)s"),
|
LOG.info("Port %(port_id)s not present in bridge %(br_name)s",
|
||||||
{'port_id': port_id, 'br_name': self.br_name})
|
{'port_id': port_id, 'br_name': self.br_name})
|
||||||
|
|
||||||
def delete_ports(self, all_ports=False):
|
def delete_ports(self, all_ports=False):
|
||||||
@ -837,7 +837,7 @@ class DeferredOVSBridge(object):
|
|||||||
if exc_type is None:
|
if exc_type is None:
|
||||||
self.apply_flows()
|
self.apply_flows()
|
||||||
else:
|
else:
|
||||||
LOG.exception(_LE("OVS flows could not be applied on bridge %s"),
|
LOG.exception("OVS flows could not be applied on bridge %s",
|
||||||
self.br.br_name)
|
self.br.br_name)
|
||||||
|
|
||||||
|
|
||||||
|
@ -19,7 +19,6 @@ from oslo_config import cfg
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from oslo_utils import timeutils
|
from oslo_utils import timeutils
|
||||||
|
|
||||||
from neutron._i18n import _LE
|
|
||||||
from neutron.common import utils as neutron_utils
|
from neutron.common import utils as neutron_utils
|
||||||
from neutron.conf.agent import common as config
|
from neutron.conf.agent import common as config
|
||||||
from neutron.conf.agent.database import agents_db
|
from neutron.conf.agent.database import agents_db
|
||||||
@ -53,7 +52,7 @@ def load_interface_driver(conf):
|
|||||||
INTERFACE_NAMESPACE, conf.interface_driver)
|
INTERFACE_NAMESPACE, conf.interface_driver)
|
||||||
return loaded_class(conf)
|
return loaded_class(conf)
|
||||||
except ImportError:
|
except ImportError:
|
||||||
LOG.error(_LE("Error loading interface driver '%s'"),
|
LOG.error("Error loading interface driver '%s'",
|
||||||
conf.interface_driver)
|
conf.interface_driver)
|
||||||
raise SystemExit(1)
|
raise SystemExit(1)
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ from oslo_utils import fileutils
|
|||||||
from oslo_utils import importutils
|
from oslo_utils import importutils
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from neutron._i18n import _, _LE, _LI, _LW
|
from neutron._i18n import _
|
||||||
from neutron.agent.linux import dhcp
|
from neutron.agent.linux import dhcp
|
||||||
from neutron.agent.linux import external_process
|
from neutron.agent.linux import external_process
|
||||||
from neutron.agent.metadata import driver as metadata_driver
|
from neutron.agent.metadata import driver as metadata_driver
|
||||||
@ -120,7 +120,7 @@ class DhcpAgent(manager.Manager):
|
|||||||
|
|
||||||
def after_start(self):
|
def after_start(self):
|
||||||
self.run()
|
self.run()
|
||||||
LOG.info(_LI("DHCP agent started"))
|
LOG.info("DHCP agent started")
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
"""Activate the DHCP agent."""
|
"""Activate the DHCP agent."""
|
||||||
@ -164,7 +164,7 @@ class DhcpAgent(manager.Manager):
|
|||||||
or isinstance(e, exceptions.NetworkNotFound)):
|
or isinstance(e, exceptions.NetworkNotFound)):
|
||||||
LOG.debug("Network %s has been deleted.", network.id)
|
LOG.debug("Network %s has been deleted.", network.id)
|
||||||
else:
|
else:
|
||||||
LOG.exception(_LE('Unable to %(action)s dhcp for %(net_id)s.'),
|
LOG.exception('Unable to %(action)s dhcp for %(net_id)s.',
|
||||||
{'net_id': network.id, 'action': action})
|
{'net_id': network.id, 'action': action})
|
||||||
|
|
||||||
def schedule_resync(self, reason, network_id=None):
|
def schedule_resync(self, reason, network_id=None):
|
||||||
@ -179,21 +179,21 @@ class DhcpAgent(manager.Manager):
|
|||||||
or 'None' is one of the networks, sync all of the networks.
|
or 'None' is one of the networks, sync all of the networks.
|
||||||
"""
|
"""
|
||||||
only_nets = set([] if (not networks or None in networks) else networks)
|
only_nets = set([] if (not networks or None in networks) else networks)
|
||||||
LOG.info(_LI('Synchronizing state'))
|
LOG.info('Synchronizing state')
|
||||||
pool = eventlet.GreenPool(self.conf.num_sync_threads)
|
pool = eventlet.GreenPool(self.conf.num_sync_threads)
|
||||||
known_network_ids = set(self.cache.get_network_ids())
|
known_network_ids = set(self.cache.get_network_ids())
|
||||||
|
|
||||||
try:
|
try:
|
||||||
active_networks = self.plugin_rpc.get_active_networks_info()
|
active_networks = self.plugin_rpc.get_active_networks_info()
|
||||||
LOG.info(_LI('All active networks have been fetched through RPC.'))
|
LOG.info('All active networks have been fetched through RPC.')
|
||||||
active_network_ids = set(network.id for network in active_networks)
|
active_network_ids = set(network.id for network in active_networks)
|
||||||
for deleted_id in known_network_ids - active_network_ids:
|
for deleted_id in known_network_ids - active_network_ids:
|
||||||
try:
|
try:
|
||||||
self.disable_dhcp_helper(deleted_id)
|
self.disable_dhcp_helper(deleted_id)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.schedule_resync(e, deleted_id)
|
self.schedule_resync(e, deleted_id)
|
||||||
LOG.exception(_LE('Unable to sync network state on '
|
LOG.exception('Unable to sync network state on '
|
||||||
'deleted network %s'), deleted_id)
|
'deleted network %s', deleted_id)
|
||||||
|
|
||||||
for network in active_networks:
|
for network in active_networks:
|
||||||
if (not only_nets or # specifically resync all
|
if (not only_nets or # specifically resync all
|
||||||
@ -204,7 +204,7 @@ class DhcpAgent(manager.Manager):
|
|||||||
# we notify all ports in case some were created while the agent
|
# we notify all ports in case some were created while the agent
|
||||||
# was down
|
# was down
|
||||||
self.dhcp_ready_ports |= set(self.cache.get_port_ids(only_nets))
|
self.dhcp_ready_ports |= set(self.cache.get_port_ids(only_nets))
|
||||||
LOG.info(_LI('Synchronizing state complete'))
|
LOG.info('Synchronizing state complete')
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if only_nets:
|
if only_nets:
|
||||||
@ -212,7 +212,7 @@ class DhcpAgent(manager.Manager):
|
|||||||
self.schedule_resync(e, network_id)
|
self.schedule_resync(e, network_id)
|
||||||
else:
|
else:
|
||||||
self.schedule_resync(e)
|
self.schedule_resync(e)
|
||||||
LOG.exception(_LE('Unable to sync network state.'))
|
LOG.exception('Unable to sync network state.')
|
||||||
|
|
||||||
def _dhcp_ready_ports_loop(self):
|
def _dhcp_ready_ports_loop(self):
|
||||||
"""Notifies the server of any ports that had reservations setup."""
|
"""Notifies the server of any ports that had reservations setup."""
|
||||||
@ -226,12 +226,12 @@ class DhcpAgent(manager.Manager):
|
|||||||
self.plugin_rpc.dhcp_ready_on_ports(ports_to_send)
|
self.plugin_rpc.dhcp_ready_on_ports(ports_to_send)
|
||||||
continue
|
continue
|
||||||
except oslo_messaging.MessagingTimeout:
|
except oslo_messaging.MessagingTimeout:
|
||||||
LOG.error(_LE("Timeout notifying server of ports ready. "
|
LOG.error("Timeout notifying server of ports ready. "
|
||||||
"Retrying..."))
|
"Retrying...")
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Failure notifying DHCP server of "
|
LOG.exception("Failure notifying DHCP server of "
|
||||||
"ready DHCP ports. Will retry on next "
|
"ready DHCP ports. Will retry on next "
|
||||||
"iteration."))
|
"iteration.")
|
||||||
self.dhcp_ready_ports |= ports_to_send
|
self.dhcp_ready_ports |= ports_to_send
|
||||||
|
|
||||||
def start_ready_ports_loop(self):
|
def start_ready_ports_loop(self):
|
||||||
@ -267,7 +267,7 @@ class DhcpAgent(manager.Manager):
|
|||||||
return network
|
return network
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.schedule_resync(e, network_id)
|
self.schedule_resync(e, network_id)
|
||||||
LOG.exception(_LE('Network %s info call failed.'), network_id)
|
LOG.exception('Network %s info call failed.', network_id)
|
||||||
|
|
||||||
def enable_dhcp_helper(self, network_id):
|
def enable_dhcp_helper(self, network_id):
|
||||||
"""Enable DHCP for a network that meets enabling criteria."""
|
"""Enable DHCP for a network that meets enabling criteria."""
|
||||||
@ -279,12 +279,12 @@ class DhcpAgent(manager.Manager):
|
|||||||
def safe_configure_dhcp_for_network(self, network):
|
def safe_configure_dhcp_for_network(self, network):
|
||||||
try:
|
try:
|
||||||
network_id = network.get('id')
|
network_id = network.get('id')
|
||||||
LOG.info(_LI('Starting network %s dhcp configuration'), network_id)
|
LOG.info('Starting network %s dhcp configuration', network_id)
|
||||||
self.configure_dhcp_for_network(network)
|
self.configure_dhcp_for_network(network)
|
||||||
LOG.info(_LI('Finished network %s dhcp configuration'), network_id)
|
LOG.info('Finished network %s dhcp configuration', network_id)
|
||||||
except (exceptions.NetworkNotFound, RuntimeError):
|
except (exceptions.NetworkNotFound, RuntimeError):
|
||||||
LOG.warning(_LW('Network %s may have been deleted and '
|
LOG.warning('Network %s may have been deleted and '
|
||||||
'its resources may have already been disposed.'),
|
'its resources may have already been disposed.',
|
||||||
network.id)
|
network.id)
|
||||||
|
|
||||||
def configure_dhcp_for_network(self, network):
|
def configure_dhcp_for_network(self, network):
|
||||||
@ -411,7 +411,7 @@ class DhcpAgent(manager.Manager):
|
|||||||
network = self.cache.get_network_by_id(updated_port.network_id)
|
network = self.cache.get_network_by_id(updated_port.network_id)
|
||||||
if not network:
|
if not network:
|
||||||
return
|
return
|
||||||
LOG.info(_LI("Trigger reload_allocations for port %s"),
|
LOG.info("Trigger reload_allocations for port %s",
|
||||||
updated_port)
|
updated_port)
|
||||||
driver_action = 'reload_allocations'
|
driver_action = 'reload_allocations'
|
||||||
if self._is_port_on_this_agent(updated_port):
|
if self._is_port_on_this_agent(updated_port):
|
||||||
@ -498,10 +498,10 @@ class DhcpAgent(manager.Manager):
|
|||||||
if router_ports:
|
if router_ports:
|
||||||
# Multiple router ports should not be allowed
|
# Multiple router ports should not be allowed
|
||||||
if len(router_ports) > 1:
|
if len(router_ports) > 1:
|
||||||
LOG.warning(_LW("%(port_num)d router ports found on the "
|
LOG.warning("%(port_num)d router ports found on the "
|
||||||
"metadata access network. Only the port "
|
"metadata access network. Only the port "
|
||||||
"%(port_id)s, for router %(router_id)s "
|
"%(port_id)s, for router %(router_id)s "
|
||||||
"will be considered"),
|
"will be considered",
|
||||||
{'port_num': len(router_ports),
|
{'port_num': len(router_ports),
|
||||||
'port_id': router_ports[0].id,
|
'port_id': router_ports[0].id,
|
||||||
'router_id': router_ports[0].device_id})
|
'router_id': router_ports[0].device_id})
|
||||||
@ -733,18 +733,18 @@ class DhcpAgentWithStateReport(DhcpAgent):
|
|||||||
agent_status = self.state_rpc.report_state(
|
agent_status = self.state_rpc.report_state(
|
||||||
ctx, self.agent_state, True)
|
ctx, self.agent_state, True)
|
||||||
if agent_status == n_const.AGENT_REVIVED:
|
if agent_status == n_const.AGENT_REVIVED:
|
||||||
LOG.info(_LI("Agent has just been revived. "
|
LOG.info("Agent has just been revived. "
|
||||||
"Scheduling full sync"))
|
"Scheduling full sync")
|
||||||
self.schedule_resync("Agent has just been revived")
|
self.schedule_resync("Agent has just been revived")
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
# This means the server does not support report_state
|
# This means the server does not support report_state
|
||||||
LOG.warning(_LW("Neutron server does not support state report. "
|
LOG.warning("Neutron server does not support state report. "
|
||||||
"State report for this agent will be disabled."))
|
"State report for this agent will be disabled.")
|
||||||
self.heartbeat.stop()
|
self.heartbeat.stop()
|
||||||
self.run()
|
self.run()
|
||||||
return
|
return
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Failed reporting state!"))
|
LOG.exception("Failed reporting state!")
|
||||||
return
|
return
|
||||||
if self.agent_state.pop('start_flag', None):
|
if self.agent_state.pop('start_flag', None):
|
||||||
self.run()
|
self.run()
|
||||||
@ -753,7 +753,7 @@ class DhcpAgentWithStateReport(DhcpAgent):
|
|||||||
"""Handle the agent_updated notification event."""
|
"""Handle the agent_updated notification event."""
|
||||||
self.schedule_resync(_("Agent updated: %(payload)s") %
|
self.schedule_resync(_("Agent updated: %(payload)s") %
|
||||||
{"payload": payload})
|
{"payload": payload})
|
||||||
LOG.info(_LI("agent_updated by server side %s!"), payload)
|
LOG.info("agent_updated by server side %s!", payload)
|
||||||
|
|
||||||
def after_start(self):
|
def after_start(self):
|
||||||
LOG.info(_LI("DHCP agent started"))
|
LOG.info("DHCP agent started")
|
||||||
|
@ -20,7 +20,6 @@ from neutron_lib.utils import helpers
|
|||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from neutron._i18n import _LE, _LW
|
|
||||||
from neutron.agent.l2 import l2_agent_extension
|
from neutron.agent.l2 import l2_agent_extension
|
||||||
from neutron.agent.linux import bridge_lib
|
from neutron.agent.linux import bridge_lib
|
||||||
from neutron.conf.agent import l2_ext_fdb_population
|
from neutron.conf.agent import l2_ext_fdb_population
|
||||||
@ -73,9 +72,9 @@ class FdbPopulationAgentExtension(
|
|||||||
try:
|
try:
|
||||||
_stdout = bridge_lib.FdbInterface.show(device)
|
_stdout = bridge_lib.FdbInterface.show(device)
|
||||||
except RuntimeError as e:
|
except RuntimeError as e:
|
||||||
LOG.warning(_LW(
|
LOG.warning(
|
||||||
'Unable to find FDB Interface %(device)s. '
|
'Unable to find FDB Interface %(device)s. '
|
||||||
'Exception: %(e)s'), {'device': device, 'e': e})
|
'Exception: %(e)s', {'device': device, 'e': e})
|
||||||
continue
|
continue
|
||||||
self.device_to_macs[device] = _stdout.split()[::3]
|
self.device_to_macs[device] = _stdout.split()[::3]
|
||||||
|
|
||||||
@ -94,10 +93,10 @@ class FdbPopulationAgentExtension(
|
|||||||
try:
|
try:
|
||||||
bridge_lib.FdbInterface.add(mac, device)
|
bridge_lib.FdbInterface.add(mac, device)
|
||||||
except RuntimeError as e:
|
except RuntimeError as e:
|
||||||
LOG.warning(_LW(
|
LOG.warning(
|
||||||
'Unable to add mac %(mac)s '
|
'Unable to add mac %(mac)s '
|
||||||
'to FDB Interface %(device)s. '
|
'to FDB Interface %(device)s. '
|
||||||
'Exception: %(e)s'),
|
'Exception: %(e)s',
|
||||||
{'mac': mac, 'device': device, 'e': e})
|
{'mac': mac, 'device': device, 'e': e})
|
||||||
return
|
return
|
||||||
self.device_to_macs[device].append(mac)
|
self.device_to_macs[device].append(mac)
|
||||||
@ -105,19 +104,19 @@ class FdbPopulationAgentExtension(
|
|||||||
def delete_port(self, devices, port_id):
|
def delete_port(self, devices, port_id):
|
||||||
mac = self.portid_to_mac.get(port_id)
|
mac = self.portid_to_mac.get(port_id)
|
||||||
if mac is None:
|
if mac is None:
|
||||||
LOG.warning(_LW('Port Id %(port_id)s does not have a rule for '
|
LOG.warning('Port Id %(port_id)s does not have a rule for '
|
||||||
'devices %(devices)s in FDB table'),
|
'devices %(devices)s in FDB table',
|
||||||
{'port_id': port_id, 'devices': devices})
|
{'port_id': port_id, 'devices': devices})
|
||||||
return
|
return
|
||||||
for device in devices:
|
for device in devices:
|
||||||
if mac in self.device_to_macs[device]:
|
if mac in self.device_to_macs[device]:
|
||||||
try:
|
try:
|
||||||
bridge_lib.FdbInterface.delete(mac, device)
|
bridge_lib.FdbInterface.delete(mac, device)
|
||||||
except RuntimeError as e:
|
except RuntimeError as e:
|
||||||
LOG.warning(_LW(
|
LOG.warning(
|
||||||
'Unable to delete mac %(mac)s '
|
'Unable to delete mac %(mac)s '
|
||||||
'from FDB Interface %(device)s. '
|
'from FDB Interface %(device)s. '
|
||||||
'Exception: %(e)s'),
|
'Exception: %(e)s',
|
||||||
{'mac': mac, 'device': device, 'e': e})
|
{'mac': mac, 'device': device, 'e': e})
|
||||||
return
|
return
|
||||||
self.device_to_macs[device].remove(mac)
|
self.device_to_macs[device].remove(mac)
|
||||||
@ -129,17 +128,17 @@ class FdbPopulationAgentExtension(
|
|||||||
valid_driver_types = (linux_bridge_constants.EXTENSION_DRIVER_TYPE,
|
valid_driver_types = (linux_bridge_constants.EXTENSION_DRIVER_TYPE,
|
||||||
ovs_constants.EXTENSION_DRIVER_TYPE)
|
ovs_constants.EXTENSION_DRIVER_TYPE)
|
||||||
if driver_type not in valid_driver_types:
|
if driver_type not in valid_driver_types:
|
||||||
LOG.error(_LE('FDB extension is only supported for OVS and '
|
LOG.error('FDB extension is only supported for OVS and '
|
||||||
'linux bridge agent, currently uses '
|
'linux bridge agent, currently uses '
|
||||||
'%(driver_type)s'), {'driver_type': driver_type})
|
'%(driver_type)s', {'driver_type': driver_type})
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
self.device_mappings = helpers.parse_mappings(
|
self.device_mappings = helpers.parse_mappings(
|
||||||
cfg.CONF.FDB.shared_physical_device_mappings, unique_keys=False)
|
cfg.CONF.FDB.shared_physical_device_mappings, unique_keys=False)
|
||||||
devices = self._get_devices()
|
devices = self._get_devices()
|
||||||
if not devices:
|
if not devices:
|
||||||
LOG.error(_LE('Invalid configuration provided for FDB extension: '
|
LOG.error('Invalid configuration provided for FDB extension: '
|
||||||
'no physical devices'))
|
'no physical devices')
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
self.fdb_tracker = self.FdbTableTracker(devices)
|
self.fdb_tracker = self.FdbTableTracker(devices)
|
||||||
|
|
||||||
|
@ -20,7 +20,6 @@ from oslo_concurrency import lockutils
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from neutron._i18n import _LW, _LI
|
|
||||||
from neutron.agent.l2 import l2_agent_extension
|
from neutron.agent.l2 import l2_agent_extension
|
||||||
from neutron.api.rpc.callbacks.consumer import registry
|
from neutron.api.rpc.callbacks.consumer import registry
|
||||||
from neutron.api.rpc.callbacks import events
|
from neutron.api.rpc.callbacks import events
|
||||||
@ -107,8 +106,8 @@ class QosAgentDriver(object):
|
|||||||
if rule_type in self.SUPPORTED_RULES:
|
if rule_type in self.SUPPORTED_RULES:
|
||||||
yield rule
|
yield rule
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW('Unsupported QoS rule type for %(rule_id)s: '
|
LOG.warning('Unsupported QoS rule type for %(rule_id)s: '
|
||||||
'%(rule_type)s; skipping'),
|
'%(rule_type)s; skipping',
|
||||||
{'rule_id': rule.id, 'rule_type': rule_type})
|
{'rule_id': rule.id, 'rule_type': rule_type})
|
||||||
|
|
||||||
def _handle_rule_delete(self, port, rule_type, ingress=False):
|
def _handle_rule_delete(self, port, rule_type, ingress=False):
|
||||||
@ -261,9 +260,9 @@ class QosAgentExtension(l2_agent_extension.L2AgentExtension):
|
|||||||
qos_policy = self.resource_rpc.pull(
|
qos_policy = self.resource_rpc.pull(
|
||||||
context, resources.QOS_POLICY, qos_policy_id)
|
context, resources.QOS_POLICY, qos_policy_id)
|
||||||
if qos_policy is None:
|
if qos_policy is None:
|
||||||
LOG.info(_LI("QoS policy %(qos_policy_id)s applied to port "
|
LOG.info("QoS policy %(qos_policy_id)s applied to port "
|
||||||
"%(port_id)s is not available on server, "
|
"%(port_id)s is not available on server, "
|
||||||
"it has been deleted. Skipping."),
|
"it has been deleted. Skipping.",
|
||||||
{'qos_policy_id': qos_policy_id, 'port_id': port_id})
|
{'qos_policy_id': qos_policy_id, 'port_id': port_id})
|
||||||
self._process_reset_port(port)
|
self._process_reset_port(port)
|
||||||
else:
|
else:
|
||||||
|
@ -12,7 +12,6 @@
|
|||||||
|
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
|
|
||||||
from neutron._i18n import _LE
|
|
||||||
from neutron.agent import agent_extensions_manager as agent_ext_manager
|
from neutron.agent import agent_extensions_manager as agent_ext_manager
|
||||||
from neutron.conf.agent import agent_extensions_manager as agent_ext_mgr_config
|
from neutron.conf.agent import agent_extensions_manager as agent_ext_mgr_config
|
||||||
|
|
||||||
@ -43,8 +42,8 @@ class L2AgentExtensionsManager(agent_ext_manager.AgentExtensionsManager):
|
|||||||
extension.obj.handle_port(context, data)
|
extension.obj.handle_port(context, data)
|
||||||
else:
|
else:
|
||||||
LOG.error(
|
LOG.error(
|
||||||
_LE("Agent Extension '%(name)s' does not "
|
"Agent Extension '%(name)s' does not "
|
||||||
"implement method handle_port"),
|
"implement method handle_port",
|
||||||
{'name': extension.name}
|
{'name': extension.name}
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -55,7 +54,7 @@ class L2AgentExtensionsManager(agent_ext_manager.AgentExtensionsManager):
|
|||||||
extension.obj.delete_port(context, data)
|
extension.obj.delete_port(context, data)
|
||||||
else:
|
else:
|
||||||
LOG.error(
|
LOG.error(
|
||||||
_LE("Agent Extension '%(name)s' does not "
|
"Agent Extension '%(name)s' does not "
|
||||||
"implement method delete_port"),
|
"implement method delete_port",
|
||||||
{'name': extension.name}
|
{'name': extension.name}
|
||||||
)
|
)
|
||||||
|
@ -33,7 +33,7 @@ from oslo_utils import excutils
|
|||||||
from oslo_utils import timeutils
|
from oslo_utils import timeutils
|
||||||
from osprofiler import profiler
|
from osprofiler import profiler
|
||||||
|
|
||||||
from neutron._i18n import _, _LE, _LI, _LW
|
from neutron._i18n import _
|
||||||
from neutron.agent.common import utils as common_utils
|
from neutron.agent.common import utils as common_utils
|
||||||
from neutron.agent.l3 import dvr
|
from neutron.agent.l3 import dvr
|
||||||
from neutron.agent.l3 import dvr_edge_ha_router
|
from neutron.agent.l3 import dvr_edge_ha_router
|
||||||
@ -221,20 +221,20 @@ class L3NATAgent(ha.AgentMixin,
|
|||||||
self.neutron_service_plugins = (
|
self.neutron_service_plugins = (
|
||||||
self.plugin_rpc.get_service_plugin_list(self.context))
|
self.plugin_rpc.get_service_plugin_list(self.context))
|
||||||
except oslo_messaging.RemoteError as e:
|
except oslo_messaging.RemoteError as e:
|
||||||
LOG.warning(_LW('l3-agent cannot check service plugins '
|
LOG.warning('l3-agent cannot check service plugins '
|
||||||
'enabled at the neutron server when '
|
'enabled at the neutron server when '
|
||||||
'startup due to RPC error. It happens '
|
'startup due to RPC error. It happens '
|
||||||
'when the server does not support this '
|
'when the server does not support this '
|
||||||
'RPC API. If the error is '
|
'RPC API. If the error is '
|
||||||
'UnsupportedVersion you can ignore this '
|
'UnsupportedVersion you can ignore this '
|
||||||
'warning. Detail message: %s'), e)
|
'warning. Detail message: %s', e)
|
||||||
self.neutron_service_plugins = None
|
self.neutron_service_plugins = None
|
||||||
except oslo_messaging.MessagingTimeout as e:
|
except oslo_messaging.MessagingTimeout as e:
|
||||||
LOG.warning(_LW('l3-agent cannot contact neutron server '
|
LOG.warning('l3-agent cannot contact neutron server '
|
||||||
'to retrieve service plugins enabled. '
|
'to retrieve service plugins enabled. '
|
||||||
'Check connectivity to neutron server. '
|
'Check connectivity to neutron server. '
|
||||||
'Retrying... '
|
'Retrying... '
|
||||||
'Detailed message: %(msg)s.'), {'msg': e})
|
'Detailed message: %(msg)s.', {'msg': e})
|
||||||
continue
|
continue
|
||||||
break
|
break
|
||||||
|
|
||||||
@ -272,15 +272,15 @@ class L3NATAgent(ha.AgentMixin,
|
|||||||
The actual values are not verified for correctness.
|
The actual values are not verified for correctness.
|
||||||
"""
|
"""
|
||||||
if not self.conf.interface_driver:
|
if not self.conf.interface_driver:
|
||||||
msg = _LE('An interface driver must be specified')
|
msg = 'An interface driver must be specified'
|
||||||
LOG.error(msg)
|
LOG.error(msg)
|
||||||
raise SystemExit(1)
|
raise SystemExit(1)
|
||||||
|
|
||||||
if self.conf.ipv6_gateway:
|
if self.conf.ipv6_gateway:
|
||||||
# ipv6_gateway configured. Check for valid v6 link-local address.
|
# ipv6_gateway configured. Check for valid v6 link-local address.
|
||||||
try:
|
try:
|
||||||
msg = _LE("%s used in config as ipv6_gateway is not a valid "
|
msg = ("%s used in config as ipv6_gateway is not a valid "
|
||||||
"IPv6 link-local address."),
|
"IPv6 link-local address.")
|
||||||
ip_addr = netaddr.IPAddress(self.conf.ipv6_gateway)
|
ip_addr = netaddr.IPAddress(self.conf.ipv6_gateway)
|
||||||
if ip_addr.version != 6 or not ip_addr.is_link_local():
|
if ip_addr.version != 6 or not ip_addr.is_link_local():
|
||||||
LOG.error(msg, self.conf.ipv6_gateway)
|
LOG.error(msg, self.conf.ipv6_gateway)
|
||||||
@ -361,13 +361,13 @@ class L3NATAgent(ha.AgentMixin,
|
|||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
del self.router_info[router_id]
|
del self.router_info[router_id]
|
||||||
LOG.exception(_LE('Error while initializing router %s'),
|
LOG.exception('Error while initializing router %s',
|
||||||
router_id)
|
router_id)
|
||||||
self.namespaces_manager.ensure_router_cleanup(router_id)
|
self.namespaces_manager.ensure_router_cleanup(router_id)
|
||||||
try:
|
try:
|
||||||
ri.delete()
|
ri.delete()
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE('Error while deleting router %s'),
|
LOG.exception('Error while deleting router %s',
|
||||||
router_id)
|
router_id)
|
||||||
|
|
||||||
def _safe_router_removed(self, router_id):
|
def _safe_router_removed(self, router_id):
|
||||||
@ -377,7 +377,7 @@ class L3NATAgent(ha.AgentMixin,
|
|||||||
self._router_removed(router_id)
|
self._router_removed(router_id)
|
||||||
self.l3_ext_manager.delete_router(self.context, router_id)
|
self.l3_ext_manager.delete_router(self.context, router_id)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE('Error while deleting router %s'), router_id)
|
LOG.exception('Error while deleting router %s', router_id)
|
||||||
return False
|
return False
|
||||||
else:
|
else:
|
||||||
return True
|
return True
|
||||||
@ -385,8 +385,8 @@ class L3NATAgent(ha.AgentMixin,
|
|||||||
def _router_removed(self, router_id):
|
def _router_removed(self, router_id):
|
||||||
ri = self.router_info.get(router_id)
|
ri = self.router_info.get(router_id)
|
||||||
if ri is None:
|
if ri is None:
|
||||||
LOG.warning(_LW("Info for router %s was not found. "
|
LOG.warning("Info for router %s was not found. "
|
||||||
"Performing router cleanup"), router_id)
|
"Performing router cleanup", router_id)
|
||||||
self.namespaces_manager.ensure_router_cleanup(router_id)
|
self.namespaces_manager.ensure_router_cleanup(router_id)
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -451,7 +451,7 @@ class L3NATAgent(ha.AgentMixin,
|
|||||||
def _process_router_if_compatible(self, router):
|
def _process_router_if_compatible(self, router):
|
||||||
if (self.conf.external_network_bridge and
|
if (self.conf.external_network_bridge and
|
||||||
not ip_lib.device_exists(self.conf.external_network_bridge)):
|
not ip_lib.device_exists(self.conf.external_network_bridge)):
|
||||||
LOG.error(_LE("The external network bridge '%s' does not exist"),
|
LOG.error("The external network bridge '%s' does not exist",
|
||||||
self.conf.external_network_bridge)
|
self.conf.external_network_bridge)
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -513,7 +513,7 @@ class L3NATAgent(ha.AgentMixin,
|
|||||||
routers = self.plugin_rpc.get_routers(self.context,
|
routers = self.plugin_rpc.get_routers(self.context,
|
||||||
[update.id])
|
[update.id])
|
||||||
except Exception:
|
except Exception:
|
||||||
msg = _LE("Failed to fetch router information for '%s'")
|
msg = "Failed to fetch router information for '%s'"
|
||||||
LOG.exception(msg, update.id)
|
LOG.exception(msg, update.id)
|
||||||
self._resync_router(update)
|
self._resync_router(update)
|
||||||
continue
|
continue
|
||||||
@ -540,12 +540,12 @@ class L3NATAgent(ha.AgentMixin,
|
|||||||
log_verbose_exc(e.msg, router)
|
log_verbose_exc(e.msg, router)
|
||||||
# Was the router previously handled by this agent?
|
# Was the router previously handled by this agent?
|
||||||
if router['id'] in self.router_info:
|
if router['id'] in self.router_info:
|
||||||
LOG.error(_LE("Removing incompatible router '%s'"),
|
LOG.error("Removing incompatible router '%s'",
|
||||||
router['id'])
|
router['id'])
|
||||||
self._safe_router_removed(router['id'])
|
self._safe_router_removed(router['id'])
|
||||||
except Exception:
|
except Exception:
|
||||||
log_verbose_exc(
|
log_verbose_exc(
|
||||||
_LE("Failed to process compatible router: %s") % update.id,
|
"Failed to process compatible router: %s" % update.id,
|
||||||
router)
|
router)
|
||||||
self._resync_router(update)
|
self._resync_router(update)
|
||||||
continue
|
continue
|
||||||
@ -632,20 +632,20 @@ class L3NATAgent(ha.AgentMixin,
|
|||||||
self.sync_routers_chunk_size = max(
|
self.sync_routers_chunk_size = max(
|
||||||
self.sync_routers_chunk_size / 2,
|
self.sync_routers_chunk_size / 2,
|
||||||
SYNC_ROUTERS_MIN_CHUNK_SIZE)
|
SYNC_ROUTERS_MIN_CHUNK_SIZE)
|
||||||
LOG.error(_LE('Server failed to return info for routers in '
|
LOG.error('Server failed to return info for routers in '
|
||||||
'required time, decreasing chunk size to: %s'),
|
'required time, decreasing chunk size to: %s',
|
||||||
self.sync_routers_chunk_size)
|
self.sync_routers_chunk_size)
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE('Server failed to return info for routers in '
|
LOG.error('Server failed to return info for routers in '
|
||||||
'required time even with min chunk size: %s. '
|
'required time even with min chunk size: %s. '
|
||||||
'It might be under very high load or '
|
'It might be under very high load or '
|
||||||
'just inoperable'),
|
'just inoperable',
|
||||||
self.sync_routers_chunk_size)
|
self.sync_routers_chunk_size)
|
||||||
raise
|
raise
|
||||||
except oslo_messaging.MessagingException:
|
except oslo_messaging.MessagingException:
|
||||||
failed_routers = chunk or router_ids
|
failed_routers = chunk or router_ids
|
||||||
LOG.exception(_LE("Failed synchronizing routers '%s' "
|
LOG.exception("Failed synchronizing routers '%s' "
|
||||||
"due to RPC error"), failed_routers)
|
"due to RPC error", failed_routers)
|
||||||
raise n_exc.AbortSyncRouters()
|
raise n_exc.AbortSyncRouters()
|
||||||
|
|
||||||
self.fullsync = False
|
self.fullsync = False
|
||||||
@ -679,7 +679,7 @@ class L3NATAgent(ha.AgentMixin,
|
|||||||
# can have L3NATAgentWithStateReport as its base class instead of
|
# can have L3NATAgentWithStateReport as its base class instead of
|
||||||
# L3NATAgent.
|
# L3NATAgent.
|
||||||
eventlet.spawn_n(self._process_routers_loop)
|
eventlet.spawn_n(self._process_routers_loop)
|
||||||
LOG.info(_LI("L3 agent started"))
|
LOG.info("L3 agent started")
|
||||||
|
|
||||||
def create_pd_router_update(self):
|
def create_pd_router_update(self):
|
||||||
router_id = None
|
router_id = None
|
||||||
@ -741,22 +741,22 @@ class L3NATAgentWithStateReport(L3NATAgent):
|
|||||||
self.agent_state,
|
self.agent_state,
|
||||||
True)
|
True)
|
||||||
if agent_status == l3_constants.AGENT_REVIVED:
|
if agent_status == l3_constants.AGENT_REVIVED:
|
||||||
LOG.info(_LI('Agent has just been revived. '
|
LOG.info('Agent has just been revived. '
|
||||||
'Doing a full sync.'))
|
'Doing a full sync.')
|
||||||
self.fullsync = True
|
self.fullsync = True
|
||||||
self.agent_state.pop('start_flag', None)
|
self.agent_state.pop('start_flag', None)
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
# This means the server does not support report_state
|
# This means the server does not support report_state
|
||||||
LOG.warning(_LW("Neutron server does not support state report. "
|
LOG.warning("Neutron server does not support state report. "
|
||||||
"State report for this agent will be disabled."))
|
"State report for this agent will be disabled.")
|
||||||
self.heartbeat.stop()
|
self.heartbeat.stop()
|
||||||
return
|
return
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Failed reporting state!"))
|
LOG.exception("Failed reporting state!")
|
||||||
|
|
||||||
def after_start(self):
|
def after_start(self):
|
||||||
eventlet.spawn_n(self._process_routers_loop)
|
eventlet.spawn_n(self._process_routers_loop)
|
||||||
LOG.info(_LI("L3 agent started"))
|
LOG.info("L3 agent started")
|
||||||
# Do the report state before we do the first full sync.
|
# Do the report state before we do the first full sync.
|
||||||
self._report_state()
|
self._report_state()
|
||||||
|
|
||||||
@ -765,4 +765,4 @@ class L3NATAgentWithStateReport(L3NATAgent):
|
|||||||
def agent_updated(self, context, payload):
|
def agent_updated(self, context, payload):
|
||||||
"""Handle the agent_updated notification event."""
|
"""Handle the agent_updated notification event."""
|
||||||
self.fullsync = True
|
self.fullsync = True
|
||||||
LOG.info(_LI("agent_updated by server side %s!"), payload)
|
LOG.info("agent_updated by server side %s!", payload)
|
||||||
|
@ -15,7 +15,6 @@
|
|||||||
from neutron_lib import constants as lib_constants
|
from neutron_lib import constants as lib_constants
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from neutron._i18n import _LE
|
|
||||||
from neutron.agent.l3 import dvr_local_router
|
from neutron.agent.l3 import dvr_local_router
|
||||||
from neutron.agent.l3 import dvr_snat_ns
|
from neutron.agent.l3 import dvr_snat_ns
|
||||||
from neutron.agent.l3 import router_info as router
|
from neutron.agent.l3 import router_info as router
|
||||||
@ -211,8 +210,8 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter):
|
|||||||
super(DvrEdgeRouter, self)._update_routing_table(
|
super(DvrEdgeRouter, self)._update_routing_table(
|
||||||
operation, route, namespace=ns_name)
|
operation, route, namespace=ns_name)
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE("The SNAT namespace %s does not exist for "
|
LOG.error("The SNAT namespace %s does not exist for "
|
||||||
"the router."), ns_name)
|
"the router.", ns_name)
|
||||||
super(DvrEdgeRouter, self).update_routing_table(operation, route)
|
super(DvrEdgeRouter, self).update_routing_table(operation, route)
|
||||||
|
|
||||||
def delete(self):
|
def delete(self):
|
||||||
|
@ -20,7 +20,7 @@ from oslo_concurrency import lockutils
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
|
|
||||||
from neutron._i18n import _, _LE, _LW
|
from neutron._i18n import _
|
||||||
from neutron.agent.l3 import fip_rule_priority_allocator as frpa
|
from neutron.agent.l3 import fip_rule_priority_allocator as frpa
|
||||||
from neutron.agent.l3 import link_local_allocator as lla
|
from neutron.agent.l3 import link_local_allocator as lla
|
||||||
from neutron.agent.l3 import namespaces
|
from neutron.agent.l3 import namespaces
|
||||||
@ -117,8 +117,8 @@ class FipNamespace(namespaces.Namespace):
|
|||||||
yield
|
yield
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('DVR: FIP namespace config failure '
|
LOG.error('DVR: FIP namespace config failure '
|
||||||
'for interface %s'), interface_name)
|
'for interface %s', interface_name)
|
||||||
|
|
||||||
def create_or_update_gateway_port(self, agent_gateway_port):
|
def create_or_update_gateway_port(self, agent_gateway_port):
|
||||||
interface_name = self.get_ext_device_name(agent_gateway_port['id'])
|
interface_name = self.get_ext_device_name(agent_gateway_port['id'])
|
||||||
@ -147,8 +147,8 @@ class FipNamespace(namespaces.Namespace):
|
|||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
self.unsubscribe(agent_gateway_port['network_id'])
|
self.unsubscribe(agent_gateway_port['network_id'])
|
||||||
self.delete()
|
self.delete()
|
||||||
LOG.exception(_LE('DVR: Gateway update in '
|
LOG.exception('DVR: Gateway update in '
|
||||||
'FIP namespace failed'))
|
'FIP namespace failed')
|
||||||
|
|
||||||
def _create_gateway_port(self, ex_gw_port, interface_name):
|
def _create_gateway_port(self, ex_gw_port, interface_name):
|
||||||
"""Create namespace, request port creationg from Plugin,
|
"""Create namespace, request port creationg from Plugin,
|
||||||
@ -296,8 +296,8 @@ class FipNamespace(namespaces.Namespace):
|
|||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
self.unsubscribe(self.agent_gateway_port['network_id'])
|
self.unsubscribe(self.agent_gateway_port['network_id'])
|
||||||
self.agent_gateway_port = None
|
self.agent_gateway_port = None
|
||||||
LOG.exception(_LE('DVR: Gateway setup in FIP namespace '
|
LOG.exception('DVR: Gateway setup in FIP namespace '
|
||||||
'failed'))
|
'failed')
|
||||||
|
|
||||||
# Now add the filter match rule for the table.
|
# Now add the filter match rule for the table.
|
||||||
ip_rule = ip_lib.IPRule(namespace=self.get_name())
|
ip_rule = ip_lib.IPRule(namespace=self.get_name())
|
||||||
@ -328,10 +328,10 @@ class FipNamespace(namespaces.Namespace):
|
|||||||
# throw exceptions. Unsubscribe this external network so that
|
# throw exceptions. Unsubscribe this external network so that
|
||||||
# the next call will trigger the interface to be plugged.
|
# the next call will trigger the interface to be plugged.
|
||||||
if not ipd.exists():
|
if not ipd.exists():
|
||||||
LOG.warning(_LW('DVR: FIP gateway port with interface '
|
LOG.warning('DVR: FIP gateway port with interface '
|
||||||
'name: %(device)s does not exist in the given '
|
'name: %(device)s does not exist in the given '
|
||||||
'namespace: %(ns)s'), {'device': interface_name,
|
'namespace: %(ns)s', {'device': interface_name,
|
||||||
'ns': ns_name})
|
'ns': ns_name})
|
||||||
msg = _('DVR: Gateway update route in FIP namespace failed, retry '
|
msg = _('DVR: Gateway update route in FIP namespace failed, retry '
|
||||||
'should be attempted on next call')
|
'should be attempted on next call')
|
||||||
raise n_exc.FloatingIpSetupException(msg)
|
raise n_exc.FloatingIpSetupException(msg)
|
||||||
|
@ -22,7 +22,6 @@ from oslo_log import log as logging
|
|||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from neutron._i18n import _LE, _LW
|
|
||||||
from neutron.agent.l3 import dvr_fip_ns
|
from neutron.agent.l3 import dvr_fip_ns
|
||||||
from neutron.agent.l3 import dvr_router_base
|
from neutron.agent.l3 import dvr_router_base
|
||||||
from neutron.agent.linux import ip_lib
|
from neutron.agent.linux import ip_lib
|
||||||
@ -239,16 +238,16 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase):
|
|||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
if operation == 'add':
|
if operation == 'add':
|
||||||
LOG.warning(_LW("Device %s does not exist so ARP entry "
|
LOG.warning("Device %s does not exist so ARP entry "
|
||||||
"cannot be updated, will cache "
|
"cannot be updated, will cache "
|
||||||
"information to be applied later "
|
"information to be applied later "
|
||||||
"when the device exists"),
|
"when the device exists",
|
||||||
device)
|
device)
|
||||||
self._cache_arp_entry(ip, mac, subnet_id, operation)
|
self._cache_arp_entry(ip, mac, subnet_id, operation)
|
||||||
return False
|
return False
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE("DVR: Failed updating arp entry"))
|
LOG.exception("DVR: Failed updating arp entry")
|
||||||
|
|
||||||
def _set_subnet_arp_info(self, subnet_id):
|
def _set_subnet_arp_info(self, subnet_id):
|
||||||
"""Set ARP info retrieved from Plugin for existing ports."""
|
"""Set ARP info retrieved from Plugin for existing ports."""
|
||||||
@ -356,10 +355,10 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase):
|
|||||||
priority=snat_idx)
|
priority=snat_idx)
|
||||||
except Exception:
|
except Exception:
|
||||||
if is_add:
|
if is_add:
|
||||||
exc = _LE('DVR: error adding redirection logic')
|
exc = 'DVR: error adding redirection logic'
|
||||||
else:
|
else:
|
||||||
exc = _LE('DVR: snat remove failed to clear the rule '
|
exc = ('DVR: snat remove failed to clear the rule '
|
||||||
'and device')
|
'and device')
|
||||||
LOG.exception(exc)
|
LOG.exception(exc)
|
||||||
|
|
||||||
def _snat_redirect_add(self, gateway, sn_port, sn_int):
|
def _snat_redirect_add(self, gateway, sn_port, sn_int):
|
||||||
|
@ -12,7 +12,6 @@
|
|||||||
|
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from neutron._i18n import _LE
|
|
||||||
from neutron.agent.l3 import router_info as router
|
from neutron.agent.l3 import router_info as router
|
||||||
from neutron.common import constants as l3_constants
|
from neutron.common import constants as l3_constants
|
||||||
|
|
||||||
@ -47,8 +46,8 @@ class DvrRouterBase(router.RouterInfo):
|
|||||||
if ip['subnet_id'] in subnet_ids:
|
if ip['subnet_id'] in subnet_ids:
|
||||||
return p
|
return p
|
||||||
|
|
||||||
LOG.error(_LE('DVR: SNAT port not found in the list '
|
LOG.error('DVR: SNAT port not found in the list '
|
||||||
'%(snat_list)s for the given router '
|
'%(snat_list)s for the given router '
|
||||||
'internal port %(int_p)s'), {
|
'internal port %(int_p)s', {
|
||||||
'snat_list': snat_ports,
|
'snat_list': snat_ports,
|
||||||
'int_p': int_port})
|
'int_p': int_port})
|
||||||
|
@ -20,7 +20,6 @@ from oslo_log import log as logging
|
|||||||
from oslo_utils import fileutils
|
from oslo_utils import fileutils
|
||||||
import webob
|
import webob
|
||||||
|
|
||||||
from neutron._i18n import _LI
|
|
||||||
from neutron.agent.linux import utils as agent_utils
|
from neutron.agent.linux import utils as agent_utils
|
||||||
from neutron.common import constants
|
from neutron.common import constants
|
||||||
from neutron.notifiers import batch_notifier
|
from neutron.notifiers import batch_notifier
|
||||||
@ -88,8 +87,8 @@ class AgentMixin(object):
|
|||||||
try:
|
try:
|
||||||
return self.router_info[router_id]
|
return self.router_info[router_id]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
LOG.info(_LI('Router %s is not managed by this agent. It was '
|
LOG.info('Router %s is not managed by this agent. It was '
|
||||||
'possibly deleted concurrently.'), router_id)
|
'possibly deleted concurrently.', router_id)
|
||||||
|
|
||||||
def check_ha_state_for_router(self, router_id, current_state):
|
def check_ha_state_for_router(self, router_id, current_state):
|
||||||
ri = self._get_router_info(router_id)
|
ri = self._get_router_info(router_id)
|
||||||
@ -110,7 +109,7 @@ class AgentMixin(object):
|
|||||||
return self.conf.ha_vrrp_advert_int
|
return self.conf.ha_vrrp_advert_int
|
||||||
|
|
||||||
def enqueue_state_change(self, router_id, state):
|
def enqueue_state_change(self, router_id, state):
|
||||||
LOG.info(_LI('Router %(router_id)s transitioned to %(state)s'),
|
LOG.info('Router %(router_id)s transitioned to %(state)s',
|
||||||
{'router_id': router_id,
|
{'router_id': router_id,
|
||||||
'state': state})
|
'state': state})
|
||||||
|
|
||||||
|
@ -21,7 +21,6 @@ from neutron_lib.api.definitions import portbindings
|
|||||||
from neutron_lib import constants as n_consts
|
from neutron_lib import constants as n_consts
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from neutron._i18n import _, _LE
|
|
||||||
from neutron.agent.l3 import namespaces
|
from neutron.agent.l3 import namespaces
|
||||||
from neutron.agent.l3 import router_info as router
|
from neutron.agent.l3 import router_info as router
|
||||||
from neutron.agent.linux import external_process
|
from neutron.agent.linux import external_process
|
||||||
@ -93,7 +92,7 @@ class HaRouter(router.RouterInfo):
|
|||||||
with open(ha_state_path, 'w') as f:
|
with open(ha_state_path, 'w') as f:
|
||||||
f.write(new_state)
|
f.write(new_state)
|
||||||
except (OSError, IOError):
|
except (OSError, IOError):
|
||||||
LOG.error(_LE('Error while writing HA state for %s'),
|
LOG.error('Error while writing HA state for %s',
|
||||||
self.router_id)
|
self.router_id)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@ -112,8 +111,8 @@ class HaRouter(router.RouterInfo):
|
|||||||
def initialize(self, process_monitor):
|
def initialize(self, process_monitor):
|
||||||
ha_port = self.router.get(n_consts.HA_INTERFACE_KEY)
|
ha_port = self.router.get(n_consts.HA_INTERFACE_KEY)
|
||||||
if not ha_port:
|
if not ha_port:
|
||||||
msg = _("Unable to process HA router %s without "
|
msg = ("Unable to process HA router %s without HA port" %
|
||||||
"HA port") % self.router_id
|
self.router_id)
|
||||||
LOG.exception(msg)
|
LOG.exception(msg)
|
||||||
raise Exception(msg)
|
raise Exception(msg)
|
||||||
super(HaRouter, self).initialize(process_monitor)
|
super(HaRouter, self).initialize(process_monitor)
|
||||||
|
@ -16,7 +16,6 @@ import os
|
|||||||
|
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from neutron._i18n import _LW
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -55,8 +54,8 @@ class ItemAllocator(object):
|
|||||||
self.remembered[key] = self.ItemClass(saved_value)
|
self.remembered[key] = self.ItemClass(saved_value)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
read_error = True
|
read_error = True
|
||||||
LOG.warning(_LW("Invalid line in %(file)s, "
|
LOG.warning("Invalid line in %(file)s, "
|
||||||
"ignoring: %(line)s"),
|
"ignoring: %(line)s",
|
||||||
{'file': state_file, 'line': line})
|
{'file': state_file, 'line': line})
|
||||||
|
|
||||||
self.pool.difference_update(self.remembered.values())
|
self.pool.difference_update(self.remembered.values())
|
||||||
|
@ -21,7 +21,7 @@ import netaddr
|
|||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from neutron._i18n import _, _LE
|
from neutron._i18n import _
|
||||||
from neutron.agent.l3 import ha
|
from neutron.agent.l3 import ha
|
||||||
from neutron.agent.linux import daemon
|
from neutron.agent.linux import daemon
|
||||||
from neutron.agent.linux import ip_lib
|
from neutron.agent.linux import ip_lib
|
||||||
@ -86,8 +86,8 @@ class MonitorDaemon(daemon.Daemon):
|
|||||||
# Remove this code once new keepalived versions are available.
|
# Remove this code once new keepalived versions are available.
|
||||||
self.send_garp(event)
|
self.send_garp(event)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE(
|
LOG.exception('Failed to process or handle event for line %s',
|
||||||
'Failed to process or handle event for line %s'), iterable)
|
iterable)
|
||||||
|
|
||||||
def write_state_change(self, state):
|
def write_state_change(self, state):
|
||||||
with open(os.path.join(
|
with open(os.path.join(
|
||||||
|
@ -15,7 +15,6 @@
|
|||||||
|
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
|
|
||||||
from neutron._i18n import _LE
|
|
||||||
from neutron.agent import agent_extensions_manager as agent_ext_manager
|
from neutron.agent import agent_extensions_manager as agent_ext_manager
|
||||||
from neutron.conf.agent import agent_extensions_manager as agent_ext_mgr_config
|
from neutron.conf.agent import agent_extensions_manager as agent_ext_mgr_config
|
||||||
|
|
||||||
@ -43,8 +42,8 @@ class L3AgentExtensionsManager(agent_ext_manager.AgentExtensionsManager):
|
|||||||
extension.obj.add_router(context, data)
|
extension.obj.add_router(context, data)
|
||||||
else:
|
else:
|
||||||
LOG.error(
|
LOG.error(
|
||||||
_LE("Agent Extension '%(name)s' does not "
|
"Agent Extension '%(name)s' does not "
|
||||||
"implement method add_router"),
|
"implement method add_router",
|
||||||
{'name': extension.name}
|
{'name': extension.name}
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -55,8 +54,8 @@ class L3AgentExtensionsManager(agent_ext_manager.AgentExtensionsManager):
|
|||||||
extension.obj.update_router(context, data)
|
extension.obj.update_router(context, data)
|
||||||
else:
|
else:
|
||||||
LOG.error(
|
LOG.error(
|
||||||
_LE("Agent Extension '%(name)s' does not "
|
"Agent Extension '%(name)s' does not "
|
||||||
"implement method update_router"),
|
"implement method update_router",
|
||||||
{'name': extension.name}
|
{'name': extension.name}
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -67,7 +66,7 @@ class L3AgentExtensionsManager(agent_ext_manager.AgentExtensionsManager):
|
|||||||
extension.obj.delete_router(context, data)
|
extension.obj.delete_router(context, data)
|
||||||
else:
|
else:
|
||||||
LOG.error(
|
LOG.error(
|
||||||
_LE("Agent Extension '%(name)s' does not "
|
"Agent Extension '%(name)s' does not "
|
||||||
"implement method delete_router"),
|
"implement method delete_router",
|
||||||
{'name': extension.name}
|
{'name': extension.name}
|
||||||
)
|
)
|
||||||
|
@ -12,7 +12,6 @@
|
|||||||
|
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from neutron._i18n import _LE
|
|
||||||
from neutron.agent.l3 import dvr_fip_ns
|
from neutron.agent.l3 import dvr_fip_ns
|
||||||
from neutron.agent.l3 import dvr_snat_ns
|
from neutron.agent.l3 import dvr_snat_ns
|
||||||
from neutron.agent.l3 import namespaces
|
from neutron.agent.l3 import namespaces
|
||||||
@ -119,8 +118,8 @@ class NamespaceManager(object):
|
|||||||
namespaces = root_ip.get_namespaces()
|
namespaces = root_ip.get_namespaces()
|
||||||
return set(ns for ns in namespaces if self.is_managed(ns))
|
return set(ns for ns in namespaces if self.is_managed(ns))
|
||||||
except RuntimeError:
|
except RuntimeError:
|
||||||
LOG.exception(_LE('RuntimeError in obtaining namespace list for '
|
LOG.exception('RuntimeError in obtaining namespace list for '
|
||||||
'namespace cleanup.'))
|
'namespace cleanup.')
|
||||||
return set()
|
return set()
|
||||||
|
|
||||||
def ensure_router_cleanup(self, router_id):
|
def ensure_router_cleanup(self, router_id):
|
||||||
@ -144,4 +143,4 @@ class NamespaceManager(object):
|
|||||||
self.process_monitor, ns_id, self.agent_conf, ns.name)
|
self.process_monitor, ns_id, self.agent_conf, ns.name)
|
||||||
ns.delete()
|
ns.delete()
|
||||||
except RuntimeError:
|
except RuntimeError:
|
||||||
LOG.exception(_LE('Failed to destroy stale namespace %s'), ns)
|
LOG.exception('Failed to destroy stale namespace %s', ns)
|
||||||
|
@ -18,7 +18,6 @@ import functools
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
|
|
||||||
from neutron._i18n import _LE, _LW
|
|
||||||
from neutron.agent.linux import ip_lib
|
from neutron.agent.linux import ip_lib
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -65,8 +64,8 @@ def check_ns_existence(f):
|
|||||||
@functools.wraps(f)
|
@functools.wraps(f)
|
||||||
def wrapped(self, *args, **kwargs):
|
def wrapped(self, *args, **kwargs):
|
||||||
if not self.exists():
|
if not self.exists():
|
||||||
LOG.warning(_LW('Namespace %(name)s does not exist. Skipping '
|
LOG.warning('Namespace %(name)s does not exist. Skipping '
|
||||||
'%(func)s'),
|
'%(func)s',
|
||||||
{'name': self.name, 'func': f.__name__})
|
{'name': self.name, 'func': f.__name__})
|
||||||
return
|
return
|
||||||
try:
|
try:
|
||||||
@ -111,7 +110,7 @@ class Namespace(object):
|
|||||||
try:
|
try:
|
||||||
self.ip_wrapper_root.netns.delete(self.name)
|
self.ip_wrapper_root.netns.delete(self.name)
|
||||||
except RuntimeError:
|
except RuntimeError:
|
||||||
msg = _LE('Failed trying to delete namespace: %s')
|
msg = 'Failed trying to delete namespace: %s'
|
||||||
LOG.exception(msg, self.name)
|
LOG.exception(msg, self.name)
|
||||||
|
|
||||||
def exists(self):
|
def exists(self):
|
||||||
|
@ -19,7 +19,7 @@ from neutron_lib import constants as lib_constants
|
|||||||
from neutron_lib.utils import helpers
|
from neutron_lib.utils import helpers
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from neutron._i18n import _, _LE, _LW
|
from neutron._i18n import _
|
||||||
from neutron.agent.l3 import namespaces
|
from neutron.agent.l3 import namespaces
|
||||||
from neutron.agent.linux import ip_lib
|
from neutron.agent.linux import ip_lib
|
||||||
from neutron.agent.linux import iptables_manager
|
from neutron.agent.linux import iptables_manager
|
||||||
@ -298,8 +298,8 @@ class RouterInfo(object):
|
|||||||
except RuntimeError:
|
except RuntimeError:
|
||||||
# any exception occurred here should cause the floating IP
|
# any exception occurred here should cause the floating IP
|
||||||
# to be set in error state
|
# to be set in error state
|
||||||
LOG.warning(_LW("Unable to configure IP address for "
|
LOG.warning("Unable to configure IP address for "
|
||||||
"floating IP: %s"), fip['id'])
|
"floating IP: %s", fip['id'])
|
||||||
|
|
||||||
def add_floating_ip(self, fip, interface_name, device):
|
def add_floating_ip(self, fip, interface_name, device):
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
@ -882,7 +882,7 @@ class RouterInfo(object):
|
|||||||
|
|
||||||
except n_exc.FloatingIpSetupException:
|
except n_exc.FloatingIpSetupException:
|
||||||
# All floating IPs must be put in error state
|
# All floating IPs must be put in error state
|
||||||
LOG.exception(_LE("Failed to process floating IPs."))
|
LOG.exception("Failed to process floating IPs.")
|
||||||
fip_statuses = self.put_fips_in_error_state()
|
fip_statuses = self.put_fips_in_error_state()
|
||||||
finally:
|
finally:
|
||||||
self.update_fip_statuses(fip_statuses)
|
self.update_fip_statuses(fip_statuses)
|
||||||
@ -908,7 +908,7 @@ class RouterInfo(object):
|
|||||||
except (n_exc.FloatingIpSetupException,
|
except (n_exc.FloatingIpSetupException,
|
||||||
n_exc.IpTablesApplyException):
|
n_exc.IpTablesApplyException):
|
||||||
# All floating IPs must be put in error state
|
# All floating IPs must be put in error state
|
||||||
LOG.exception(_LE("Failed to process floating IPs."))
|
LOG.exception("Failed to process floating IPs.")
|
||||||
fip_statuses = self.put_fips_in_error_state()
|
fip_statuses = self.put_fips_in_error_state()
|
||||||
finally:
|
finally:
|
||||||
self.update_fip_statuses(fip_statuses)
|
self.update_fip_statuses(fip_statuses)
|
||||||
@ -1102,8 +1102,8 @@ class RouterInfo(object):
|
|||||||
self.agent.pd.sync_router(self.router['id'])
|
self.agent.pd.sync_router(self.router['id'])
|
||||||
self._process_external_on_delete()
|
self._process_external_on_delete()
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW("Can't gracefully delete the router %s: "
|
LOG.warning("Can't gracefully delete the router %s: "
|
||||||
"no router namespace found."), self.router['id'])
|
"no router namespace found.", self.router['id'])
|
||||||
|
|
||||||
@common_utils.exception_logger()
|
@common_utils.exception_logger()
|
||||||
def process(self):
|
def process(self):
|
||||||
|
@ -20,7 +20,7 @@ import eventlet.queue
|
|||||||
from neutron_lib.utils import helpers
|
from neutron_lib.utils import helpers
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from neutron._i18n import _, _LE
|
from neutron._i18n import _
|
||||||
from neutron.agent.linux import ip_lib
|
from neutron.agent.linux import ip_lib
|
||||||
from neutron.agent.linux import utils
|
from neutron.agent.linux import utils
|
||||||
from neutron.common import utils as common_utils
|
from neutron.common import utils as common_utils
|
||||||
@ -182,7 +182,7 @@ class AsyncProcess(object):
|
|||||||
# root and need to be killed via the same helper.
|
# root and need to be killed via the same helper.
|
||||||
utils.kill_process(pid, kill_signal, self.run_as_root)
|
utils.kill_process(pid, kill_signal, self.run_as_root)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE('An error occurred while killing [%s].'),
|
LOG.exception('An error occurred while killing [%s].',
|
||||||
self.cmd)
|
self.cmd)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@ -211,8 +211,8 @@ class AsyncProcess(object):
|
|||||||
if not output and output != "":
|
if not output and output != "":
|
||||||
break
|
break
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE('An error occurred while communicating '
|
LOG.exception('An error occurred while communicating '
|
||||||
'with async process [%s].'), self.cmd)
|
'with async process [%s].', self.cmd)
|
||||||
break
|
break
|
||||||
# Ensure that watching a process with lots of output does
|
# Ensure that watching a process with lots of output does
|
||||||
# not block execution of other greenthreads.
|
# not block execution of other greenthreads.
|
||||||
@ -242,11 +242,11 @@ class AsyncProcess(object):
|
|||||||
def _read_stderr(self):
|
def _read_stderr(self):
|
||||||
data = self._read(self._process.stderr, self._stderr_lines)
|
data = self._read(self._process.stderr, self._stderr_lines)
|
||||||
if self.log_output:
|
if self.log_output:
|
||||||
LOG.error(_LE('Error received from [%(cmd)s]: %(err)s'),
|
LOG.error('Error received from [%(cmd)s]: %(err)s',
|
||||||
{'cmd': self.cmd,
|
{'cmd': self.cmd,
|
||||||
'err': data})
|
'err': data})
|
||||||
if self.die_on_error:
|
if self.die_on_error:
|
||||||
LOG.error(_LE("Process [%(cmd)s] dies due to the error: %(err)s"),
|
LOG.error("Process [%(cmd)s] dies due to the error: %(err)s",
|
||||||
{'cmd': self.cmd,
|
{'cmd': self.cmd,
|
||||||
'err': data})
|
'err': data})
|
||||||
# the callback caller will use None to indicate the need to bail
|
# the callback caller will use None to indicate the need to bail
|
||||||
|
@ -25,7 +25,7 @@ import sys
|
|||||||
from debtcollector import removals
|
from debtcollector import removals
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from neutron._i18n import _, _LE, _LI
|
from neutron._i18n import _
|
||||||
from neutron.common import exceptions
|
from neutron.common import exceptions
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -113,7 +113,7 @@ def drop_privileges(user=None, group=None):
|
|||||||
if user is not None:
|
if user is not None:
|
||||||
setuid(user)
|
setuid(user)
|
||||||
|
|
||||||
LOG.info(_LI("Process runs with uid/gid: %(uid)s/%(gid)s"),
|
LOG.info("Process runs with uid/gid: %(uid)s/%(gid)s",
|
||||||
{'uid': os.getuid(), 'gid': os.getgid()})
|
{'uid': os.getuid(), 'gid': os.getgid()})
|
||||||
|
|
||||||
|
|
||||||
@ -126,7 +126,7 @@ class Pidfile(object):
|
|||||||
self.fd = os.open(pidfile, os.O_CREAT | os.O_RDWR)
|
self.fd = os.open(pidfile, os.O_CREAT | os.O_RDWR)
|
||||||
fcntl.flock(self.fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
fcntl.flock(self.fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||||
except IOError:
|
except IOError:
|
||||||
LOG.exception(_LE("Error while handling pidfile: %s"), pidfile)
|
LOG.exception("Error while handling pidfile: %s", pidfile)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
@ -191,7 +191,7 @@ class Daemon(object):
|
|||||||
if pid > 0:
|
if pid > 0:
|
||||||
os._exit(0)
|
os._exit(0)
|
||||||
except OSError:
|
except OSError:
|
||||||
LOG.exception(_LE('Fork failed'))
|
LOG.exception('Fork failed')
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
def daemonize(self):
|
def daemonize(self):
|
||||||
@ -244,8 +244,8 @@ class Daemon(object):
|
|||||||
|
|
||||||
if self.pidfile is not None and self.pidfile.is_running():
|
if self.pidfile is not None and self.pidfile.is_running():
|
||||||
self.pidfile.unlock()
|
self.pidfile.unlock()
|
||||||
LOG.error(_LE('Pidfile %s already exist. Daemon already '
|
LOG.error('Pidfile %s already exist. Daemon already '
|
||||||
'running?'), self.pidfile)
|
'running?', self.pidfile)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
# Start the daemon
|
# Start the daemon
|
||||||
|
@ -32,7 +32,7 @@ from oslo_utils import fileutils
|
|||||||
from oslo_utils import uuidutils
|
from oslo_utils import uuidutils
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from neutron._i18n import _, _LI, _LW, _LE
|
from neutron._i18n import _
|
||||||
from neutron.agent.common import utils as agent_common_utils
|
from neutron.agent.common import utils as agent_common_utils
|
||||||
from neutron.agent.linux import external_process
|
from neutron.agent.linux import external_process
|
||||||
from neutron.agent.linux import ip_lib
|
from neutron.agent.linux import ip_lib
|
||||||
@ -238,7 +238,7 @@ class DhcpLocalProcess(DhcpBase):
|
|||||||
try:
|
try:
|
||||||
self.device_manager.destroy(self.network, self.interface_name)
|
self.device_manager.destroy(self.network, self.interface_name)
|
||||||
except RuntimeError:
|
except RuntimeError:
|
||||||
LOG.warning(_LW('Failed trying to delete interface: %s'),
|
LOG.warning('Failed trying to delete interface: %s',
|
||||||
self.interface_name)
|
self.interface_name)
|
||||||
|
|
||||||
ns_ip = ip_lib.IPWrapper(namespace=self.network.namespace)
|
ns_ip = ip_lib.IPWrapper(namespace=self.network.namespace)
|
||||||
@ -248,7 +248,7 @@ class DhcpLocalProcess(DhcpBase):
|
|||||||
try:
|
try:
|
||||||
ns_ip.netns.delete(self.network.namespace)
|
ns_ip.netns.delete(self.network.namespace)
|
||||||
except RuntimeError:
|
except RuntimeError:
|
||||||
LOG.warning(_LW('Failed trying to delete namespace: %s'),
|
LOG.warning('Failed trying to delete namespace: %s',
|
||||||
self.network.namespace)
|
self.network.namespace)
|
||||||
|
|
||||||
def _get_value_from_conf_file(self, kind, converter=None):
|
def _get_value_from_conf_file(self, kind, converter=None):
|
||||||
@ -421,8 +421,7 @@ class Dnsmasq(DhcpLocalProcess):
|
|||||||
if not os.path.exists(log_dir):
|
if not os.path.exists(log_dir):
|
||||||
os.makedirs(log_dir)
|
os.makedirs(log_dir)
|
||||||
except OSError:
|
except OSError:
|
||||||
LOG.error(_LE('Error while create dnsmasq log dir: %s'),
|
LOG.error('Error while create dnsmasq log dir: %s', log_dir)
|
||||||
log_dir)
|
|
||||||
else:
|
else:
|
||||||
log_filename = os.path.join(log_dir, 'dhcp_dns_log')
|
log_filename = os.path.join(log_dir, 'dhcp_dns_log')
|
||||||
cmd.append('--log-queries')
|
cmd.append('--log-queries')
|
||||||
@ -460,8 +459,8 @@ class Dnsmasq(DhcpLocalProcess):
|
|||||||
if self._IS_DHCP_RELEASE6_SUPPORTED is None:
|
if self._IS_DHCP_RELEASE6_SUPPORTED is None:
|
||||||
self._IS_DHCP_RELEASE6_SUPPORTED = checks.dhcp_release6_supported()
|
self._IS_DHCP_RELEASE6_SUPPORTED = checks.dhcp_release6_supported()
|
||||||
if not self._IS_DHCP_RELEASE6_SUPPORTED:
|
if not self._IS_DHCP_RELEASE6_SUPPORTED:
|
||||||
LOG.warning(_LW("dhcp_release6 is not present on this system, "
|
LOG.warning("dhcp_release6 is not present on this system, "
|
||||||
"will not call it again."))
|
"will not call it again.")
|
||||||
return self._IS_DHCP_RELEASE6_SUPPORTED
|
return self._IS_DHCP_RELEASE6_SUPPORTED
|
||||||
|
|
||||||
def _release_lease(self, mac_address, ip, client_id=None,
|
def _release_lease(self, mac_address, ip, client_id=None,
|
||||||
@ -483,8 +482,8 @@ class Dnsmasq(DhcpLocalProcess):
|
|||||||
except RuntimeError as e:
|
except RuntimeError as e:
|
||||||
# when failed to release single lease there's
|
# when failed to release single lease there's
|
||||||
# no need to propagate error further
|
# no need to propagate error further
|
||||||
LOG.warning(_LW('DHCP release failed for %(cmd)s. '
|
LOG.warning('DHCP release failed for %(cmd)s. '
|
||||||
'Reason: %(e)s'), {'cmd': cmd, 'e': e})
|
'Reason: %(e)s', {'cmd': cmd, 'e': e})
|
||||||
|
|
||||||
def _output_config_files(self):
|
def _output_config_files(self):
|
||||||
self._output_hosts_file()
|
self._output_hosts_file()
|
||||||
@ -798,9 +797,9 @@ class Dnsmasq(DhcpLocalProcess):
|
|||||||
server_id = l.strip().split()[1]
|
server_id = l.strip().split()[1]
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW('Multiple DUID entries in %s '
|
LOG.warning('Multiple DUID entries in %s '
|
||||||
'lease file, dnsmasq is possibly '
|
'lease file, dnsmasq is possibly '
|
||||||
'not functioning properly'),
|
'not functioning properly',
|
||||||
filename)
|
filename)
|
||||||
continue
|
continue
|
||||||
parts = l.strip().split()
|
parts = l.strip().split()
|
||||||
@ -969,9 +968,9 @@ class Dnsmasq(DhcpLocalProcess):
|
|||||||
self._format_option(opt_ip_version, port.id,
|
self._format_option(opt_ip_version, port.id,
|
||||||
opt.opt_name, opt.opt_value))
|
opt.opt_name, opt.opt_value))
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI("Cannot apply dhcp option %(opt)s "
|
LOG.info("Cannot apply dhcp option %(opt)s "
|
||||||
"because it's ip_version %(version)d "
|
"because it's ip_version %(version)d "
|
||||||
"is not in port's address IP versions"),
|
"is not in port's address IP versions",
|
||||||
{'opt': opt.opt_name,
|
{'opt': opt.opt_name,
|
||||||
'version': opt_ip_version})
|
'version': opt_ip_version})
|
||||||
|
|
||||||
@ -1269,8 +1268,8 @@ class DeviceManager(object):
|
|||||||
'device_id': device_id}})
|
'device_id': device_id}})
|
||||||
except oslo_messaging.RemoteError as e:
|
except oslo_messaging.RemoteError as e:
|
||||||
if e.exc_type == 'DhcpPortInUse':
|
if e.exc_type == 'DhcpPortInUse':
|
||||||
LOG.info(_LI("Skipping DHCP port %s as it is "
|
LOG.info("Skipping DHCP port %s as it is "
|
||||||
"already in use"), port.id)
|
"already in use", port.id)
|
||||||
continue
|
continue
|
||||||
raise
|
raise
|
||||||
if port:
|
if port:
|
||||||
@ -1374,8 +1373,8 @@ class DeviceManager(object):
|
|||||||
try:
|
try:
|
||||||
self.unplug(d.name, network)
|
self.unplug(d.name, network)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Exception during stale "
|
LOG.exception("Exception during stale "
|
||||||
"dhcp device cleanup"))
|
"dhcp device cleanup")
|
||||||
|
|
||||||
def plug(self, network, port, interface_name):
|
def plug(self, network, port, interface_name):
|
||||||
"""Plug device settings for the network's DHCP on this host."""
|
"""Plug device settings for the network's DHCP on this host."""
|
||||||
@ -1424,8 +1423,8 @@ class DeviceManager(object):
|
|||||||
self.plug(network, port, interface_name)
|
self.plug(network, port, interface_name)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception(_LE('Unable to plug DHCP port for '
|
LOG.exception('Unable to plug DHCP port for '
|
||||||
'network %s. Releasing port.'),
|
'network %s. Releasing port.',
|
||||||
network.id)
|
network.id)
|
||||||
self.plugin.release_dhcp_port(network.id, port.device_id)
|
self.plugin.release_dhcp_port(network.id, port.device_id)
|
||||||
|
|
||||||
|
@ -24,7 +24,7 @@ from oslo_utils import fileutils
|
|||||||
import psutil
|
import psutil
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from neutron._i18n import _, _LW, _LE
|
from neutron._i18n import _
|
||||||
from neutron.agent.linux import ip_lib
|
from neutron.agent.linux import ip_lib
|
||||||
from neutron.agent.linux import utils
|
from neutron.agent.linux import utils
|
||||||
|
|
||||||
@ -237,9 +237,9 @@ class ProcessMonitor(object):
|
|||||||
pm = self._monitored_processes.get(service_id)
|
pm = self._monitored_processes.get(service_id)
|
||||||
|
|
||||||
if pm and not pm.active:
|
if pm and not pm.active:
|
||||||
LOG.error(_LE("%(service)s for %(resource_type)s "
|
LOG.error("%(service)s for %(resource_type)s "
|
||||||
"with uuid %(uuid)s not found. "
|
"with uuid %(uuid)s not found. "
|
||||||
"The process should not have died"),
|
"The process should not have died",
|
||||||
{'service': service_id.service,
|
{'service': service_id.service,
|
||||||
'resource_type': self._resource_type,
|
'resource_type': self._resource_type,
|
||||||
'uuid': service_id.uuid})
|
'uuid': service_id.uuid})
|
||||||
@ -257,14 +257,14 @@ class ProcessMonitor(object):
|
|||||||
action_function(service_id)
|
action_function(service_id)
|
||||||
|
|
||||||
def _respawn_action(self, service_id):
|
def _respawn_action(self, service_id):
|
||||||
LOG.warning(_LW("Respawning %(service)s for uuid %(uuid)s"),
|
LOG.warning("Respawning %(service)s for uuid %(uuid)s",
|
||||||
{'service': service_id.service,
|
{'service': service_id.service,
|
||||||
'uuid': service_id.uuid})
|
'uuid': service_id.uuid})
|
||||||
self._monitored_processes[service_id].enable()
|
self._monitored_processes[service_id].enable()
|
||||||
|
|
||||||
def _exit_action(self, service_id):
|
def _exit_action(self, service_id):
|
||||||
LOG.error(_LE("Exiting agent as programmed in check_child_processes_"
|
LOG.error("Exiting agent as programmed in check_child_processes_"
|
||||||
"actions"))
|
"actions")
|
||||||
self._exit_handler(service_id.uuid, service_id.service)
|
self._exit_handler(service_id.uuid, service_id.service)
|
||||||
|
|
||||||
def _exit_handler(self, uuid, service):
|
def _exit_handler(self, uuid, service):
|
||||||
@ -274,7 +274,7 @@ class ProcessMonitor(object):
|
|||||||
check_child_processes_actions, and one of our external processes die
|
check_child_processes_actions, and one of our external processes die
|
||||||
unexpectedly.
|
unexpectedly.
|
||||||
"""
|
"""
|
||||||
LOG.error(_LE("Exiting agent because of a malfunction with the "
|
LOG.error("Exiting agent because of a malfunction with the "
|
||||||
"%(service)s process identified by uuid %(uuid)s"),
|
"%(service)s process identified by uuid %(uuid)s",
|
||||||
{'service': service, 'uuid': uuid})
|
{'service': service, 'uuid': uuid})
|
||||||
raise SystemExit(1)
|
raise SystemExit(1)
|
||||||
|
@ -22,7 +22,7 @@ from oslo_config import cfg
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from neutron._i18n import _, _LE, _LI, _LW
|
from neutron._i18n import _
|
||||||
from neutron.agent.common import ovs_lib
|
from neutron.agent.common import ovs_lib
|
||||||
from neutron.agent.linux import ip_lib
|
from neutron.agent.linux import ip_lib
|
||||||
from neutron.agent.linux import utils
|
from neutron.agent.linux import utils
|
||||||
@ -265,12 +265,12 @@ class LinuxInterfaceDriver(object):
|
|||||||
self.plug_new(network_id, port_id, device_name, mac_address,
|
self.plug_new(network_id, port_id, device_name, mac_address,
|
||||||
bridge, namespace, prefix, mtu)
|
bridge, namespace, prefix, mtu)
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI("Device %s already exists"), device_name)
|
LOG.info("Device %s already exists", device_name)
|
||||||
if mtu:
|
if mtu:
|
||||||
self.set_mtu(
|
self.set_mtu(
|
||||||
device_name, mtu, namespace=namespace, prefix=prefix)
|
device_name, mtu, namespace=namespace, prefix=prefix)
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW("No MTU configured for port %s"), port_id)
|
LOG.warning("No MTU configured for port %s", port_id)
|
||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
|
def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
|
||||||
@ -296,7 +296,7 @@ class LinuxInterfaceDriver(object):
|
|||||||
def set_mtu(self, device_name, mtu, namespace=None, prefix=None):
|
def set_mtu(self, device_name, mtu, namespace=None, prefix=None):
|
||||||
"""Set MTU on the interface."""
|
"""Set MTU on the interface."""
|
||||||
if not self._mtu_update_warn_logged:
|
if not self._mtu_update_warn_logged:
|
||||||
LOG.warning(_LW("Interface driver cannot update MTU for ports"))
|
LOG.warning("Interface driver cannot update MTU for ports")
|
||||||
self._mtu_update_warn_logged = True
|
self._mtu_update_warn_logged = True
|
||||||
|
|
||||||
|
|
||||||
@ -367,7 +367,7 @@ class OVSInterfaceDriver(LinuxInterfaceDriver):
|
|||||||
ns_dev.link.set_address(mac_address)
|
ns_dev.link.set_address(mac_address)
|
||||||
break
|
break
|
||||||
except RuntimeError as e:
|
except RuntimeError as e:
|
||||||
LOG.warning(_LW("Got error trying to set mac, retrying: %s"),
|
LOG.warning("Got error trying to set mac, retrying: %s",
|
||||||
str(e))
|
str(e))
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
else:
|
else:
|
||||||
@ -386,7 +386,7 @@ class OVSInterfaceDriver(LinuxInterfaceDriver):
|
|||||||
if mtu:
|
if mtu:
|
||||||
self.set_mtu(device_name, mtu, namespace=namespace, prefix=prefix)
|
self.set_mtu(device_name, mtu, namespace=namespace, prefix=prefix)
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW("No MTU configured for port %s"), port_id)
|
LOG.warning("No MTU configured for port %s", port_id)
|
||||||
|
|
||||||
ns_dev.link.set_up()
|
ns_dev.link.set_up()
|
||||||
if self.conf.ovs_use_veth:
|
if self.conf.ovs_use_veth:
|
||||||
@ -408,7 +408,7 @@ class OVSInterfaceDriver(LinuxInterfaceDriver):
|
|||||||
device.link.delete()
|
device.link.delete()
|
||||||
LOG.debug("Unplugged interface '%s'", device_name)
|
LOG.debug("Unplugged interface '%s'", device_name)
|
||||||
except RuntimeError:
|
except RuntimeError:
|
||||||
LOG.error(_LE("Failed unplugging interface '%s'"),
|
LOG.error("Failed unplugging interface '%s'",
|
||||||
device_name)
|
device_name)
|
||||||
|
|
||||||
def set_mtu(self, device_name, mtu, namespace=None, prefix=None):
|
def set_mtu(self, device_name, mtu, namespace=None, prefix=None):
|
||||||
@ -458,7 +458,7 @@ class IVSInterfaceDriver(LinuxInterfaceDriver):
|
|||||||
ns_dev.link.set_mtu(mtu)
|
ns_dev.link.set_mtu(mtu)
|
||||||
root_dev.link.set_mtu(mtu)
|
root_dev.link.set_mtu(mtu)
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW("No MTU configured for port %s"), port_id)
|
LOG.warning("No MTU configured for port %s", port_id)
|
||||||
|
|
||||||
if namespace:
|
if namespace:
|
||||||
namespace_obj = ip.ensure_namespace(namespace)
|
namespace_obj = ip.ensure_namespace(namespace)
|
||||||
@ -477,7 +477,7 @@ class IVSInterfaceDriver(LinuxInterfaceDriver):
|
|||||||
device.link.delete()
|
device.link.delete()
|
||||||
LOG.debug("Unplugged interface '%s'", device_name)
|
LOG.debug("Unplugged interface '%s'", device_name)
|
||||||
except RuntimeError:
|
except RuntimeError:
|
||||||
LOG.error(_LE("Failed unplugging interface '%s'"),
|
LOG.error("Failed unplugging interface '%s'",
|
||||||
device_name)
|
device_name)
|
||||||
|
|
||||||
|
|
||||||
@ -503,7 +503,7 @@ class BridgeInterfaceDriver(LinuxInterfaceDriver):
|
|||||||
if mtu:
|
if mtu:
|
||||||
self.set_mtu(device_name, mtu, namespace=namespace, prefix=prefix)
|
self.set_mtu(device_name, mtu, namespace=namespace, prefix=prefix)
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW("No MTU configured for port %s"), port_id)
|
LOG.warning("No MTU configured for port %s", port_id)
|
||||||
|
|
||||||
root_veth.link.set_up()
|
root_veth.link.set_up()
|
||||||
ns_veth.link.set_up()
|
ns_veth.link.set_up()
|
||||||
@ -515,7 +515,7 @@ class BridgeInterfaceDriver(LinuxInterfaceDriver):
|
|||||||
device.link.delete()
|
device.link.delete()
|
||||||
LOG.debug("Unplugged interface '%s'", device_name)
|
LOG.debug("Unplugged interface '%s'", device_name)
|
||||||
except RuntimeError:
|
except RuntimeError:
|
||||||
LOG.error(_LE("Failed unplugging interface '%s'"),
|
LOG.error("Failed unplugging interface '%s'",
|
||||||
device_name)
|
device_name)
|
||||||
|
|
||||||
def set_mtu(self, device_name, mtu, namespace=None, prefix=None):
|
def set_mtu(self, device_name, mtu, namespace=None, prefix=None):
|
||||||
|
@ -17,7 +17,6 @@ import netaddr
|
|||||||
from oslo_concurrency import lockutils
|
from oslo_concurrency import lockutils
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from neutron._i18n import _LE
|
|
||||||
from neutron.agent.linux import utils as linux_utils
|
from neutron.agent.linux import utils as linux_utils
|
||||||
from neutron.common import constants as n_const
|
from neutron.common import constants as n_const
|
||||||
from neutron.common import exceptions as n_exc
|
from neutron.common import exceptions as n_exc
|
||||||
@ -107,8 +106,7 @@ class IpConntrackManager(object):
|
|||||||
check_exit_code=True,
|
check_exit_code=True,
|
||||||
extra_ok_codes=[1])
|
extra_ok_codes=[1])
|
||||||
except RuntimeError:
|
except RuntimeError:
|
||||||
LOG.exception(
|
LOG.exception("Failed execute conntrack command %s", cmd)
|
||||||
_LE("Failed execute conntrack command %s"), cmd)
|
|
||||||
|
|
||||||
def delete_conntrack_state_by_rule(self, device_info_list, rule):
|
def delete_conntrack_state_by_rule(self, device_info_list, rule):
|
||||||
self._delete_conntrack_state(device_info_list, rule)
|
self._delete_conntrack_state(device_info_list, rule)
|
||||||
|
@ -27,7 +27,7 @@ from oslo_log import log as logging
|
|||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from neutron._i18n import _, _LE, _LW
|
from neutron._i18n import _
|
||||||
from neutron.agent.common import utils
|
from neutron.agent.common import utils
|
||||||
from neutron.common import exceptions as n_exc
|
from neutron.common import exceptions as n_exc
|
||||||
from neutron.common import utils as common_utils
|
from neutron.common import utils as common_utils
|
||||||
@ -326,8 +326,8 @@ class IPDevice(SubProcessBase):
|
|||||||
extra_ok_codes=[1])
|
extra_ok_codes=[1])
|
||||||
|
|
||||||
except RuntimeError:
|
except RuntimeError:
|
||||||
LOG.exception(_LE("Failed deleting ingress connection state of"
|
LOG.exception("Failed deleting ingress connection state of"
|
||||||
" floatingip %s"), ip_str)
|
" floatingip %s", ip_str)
|
||||||
|
|
||||||
# Delete conntrack state for egress traffic
|
# Delete conntrack state for egress traffic
|
||||||
try:
|
try:
|
||||||
@ -335,8 +335,8 @@ class IPDevice(SubProcessBase):
|
|||||||
check_exit_code=True,
|
check_exit_code=True,
|
||||||
extra_ok_codes=[1])
|
extra_ok_codes=[1])
|
||||||
except RuntimeError:
|
except RuntimeError:
|
||||||
LOG.exception(_LE("Failed deleting egress connection state of"
|
LOG.exception("Failed deleting egress connection state of"
|
||||||
" floatingip %s"), ip_str)
|
" floatingip %s", ip_str)
|
||||||
|
|
||||||
def disable_ipv6(self):
|
def disable_ipv6(self):
|
||||||
sysctl_name = re.sub(r'\.', '/', self.name)
|
sysctl_name = re.sub(r'\.', '/', self.name)
|
||||||
@ -1100,8 +1100,8 @@ def _arping(ns_name, iface_name, address, count, log_exception):
|
|||||||
'ns': ns_name,
|
'ns': ns_name,
|
||||||
'err': exc})
|
'err': exc})
|
||||||
if not exists:
|
if not exists:
|
||||||
LOG.warning(_LW("Interface %s might have been deleted "
|
LOG.warning("Interface %s might have been deleted "
|
||||||
"concurrently"), iface_name)
|
"concurrently", iface_name)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
@ -1160,7 +1160,7 @@ def sysctl(cmd, namespace=None, log_fail_as_error=True):
|
|||||||
log_fail_as_error=log_fail_as_error)
|
log_fail_as_error=log_fail_as_error)
|
||||||
except RuntimeError as rte:
|
except RuntimeError as rte:
|
||||||
LOG.warning(
|
LOG.warning(
|
||||||
_LW("Setting %(cmd)s in namespace %(ns)s failed: %(err)s."),
|
"Setting %(cmd)s in namespace %(ns)s failed: %(err)s.",
|
||||||
{'cmd': cmd,
|
{'cmd': cmd,
|
||||||
'ns': namespace,
|
'ns': namespace,
|
||||||
'err': rte})
|
'err': rte})
|
||||||
@ -1207,9 +1207,9 @@ def set_ip_nonlocal_bind_for_namespace(namespace):
|
|||||||
log_fail_as_error=False)
|
log_fail_as_error=False)
|
||||||
if failed:
|
if failed:
|
||||||
LOG.warning(
|
LOG.warning(
|
||||||
_LW("%s will not be set to 0 in the root namespace in order to "
|
"%s will not be set to 0 in the root namespace in order to "
|
||||||
"not break DVR, which requires this value be set to 1. This "
|
"not break DVR, which requires this value be set to 1. This "
|
||||||
"may introduce a race between moving a floating IP to a "
|
"may introduce a race between moving a floating IP to a "
|
||||||
"different network node, and the peer side getting a "
|
"different network node, and the peer side getting a "
|
||||||
"populated ARP cache for a given floating IP address."),
|
"populated ARP cache for a given floating IP address.",
|
||||||
IP_NONLOCAL_BIND)
|
IP_NONLOCAL_BIND)
|
||||||
|
@ -18,7 +18,7 @@ import re
|
|||||||
from neutron_lib import exceptions as n_exc
|
from neutron_lib import exceptions as n_exc
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from neutron._i18n import _, _LE
|
from neutron._i18n import _
|
||||||
from neutron.agent.linux import utils
|
from neutron.agent.linux import utils
|
||||||
|
|
||||||
|
|
||||||
@ -103,6 +103,6 @@ class IpLinkSupport(object):
|
|||||||
return_stderr=True,
|
return_stderr=True,
|
||||||
log_fail_as_error=False)
|
log_fail_as_error=False)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.exception(_LE("Failed executing ip command"))
|
LOG.exception("Failed executing ip command")
|
||||||
raise UnsupportedIpLinkCommand(reason=e)
|
raise UnsupportedIpLinkCommand(reason=e)
|
||||||
return _stdout or _stderr
|
return _stdout or _stderr
|
||||||
|
@ -16,7 +16,6 @@
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
|
|
||||||
from neutron._i18n import _LE
|
|
||||||
from neutron.agent.linux import async_process
|
from neutron.agent.linux import async_process
|
||||||
from neutron.agent.linux import ip_lib
|
from neutron.agent.linux import ip_lib
|
||||||
|
|
||||||
@ -41,7 +40,7 @@ class IPMonitorEvent(object):
|
|||||||
first_word = route[0]
|
first_word = route[0]
|
||||||
except IndexError:
|
except IndexError:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Unable to parse route "%s"'), line)
|
LOG.error('Unable to parse route "%s"', line)
|
||||||
|
|
||||||
added = (first_word != 'Deleted')
|
added = (first_word != 'Deleted')
|
||||||
if not added:
|
if not added:
|
||||||
@ -52,7 +51,7 @@ class IPMonitorEvent(object):
|
|||||||
cidr = route[3]
|
cidr = route[3]
|
||||||
except IndexError:
|
except IndexError:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE('Unable to parse route "%s"'), line)
|
LOG.error('Unable to parse route "%s"', line)
|
||||||
|
|
||||||
return cls(line, added, interface, cidr)
|
return cls(line, added, interface, cidr)
|
||||||
|
|
||||||
|
@ -21,7 +21,6 @@ from oslo_config import cfg
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from oslo_utils import netutils
|
from oslo_utils import netutils
|
||||||
|
|
||||||
from neutron._i18n import _LI
|
|
||||||
from neutron.agent import firewall
|
from neutron.agent import firewall
|
||||||
from neutron.agent.linux import ip_conntrack
|
from neutron.agent.linux import ip_conntrack
|
||||||
from neutron.agent.linux import ipset_manager
|
from neutron.agent.linux import ipset_manager
|
||||||
@ -160,8 +159,8 @@ class IptablesFirewallDriver(firewall.FirewallDriver):
|
|||||||
def update_port_filter(self, port):
|
def update_port_filter(self, port):
|
||||||
LOG.debug("Updating device (%s) filter", port['device'])
|
LOG.debug("Updating device (%s) filter", port['device'])
|
||||||
if port['device'] not in self.ports:
|
if port['device'] not in self.ports:
|
||||||
LOG.info(_LI('Attempted to update port filter which is not '
|
LOG.info('Attempted to update port filter which is not '
|
||||||
'filtered %s'), port['device'])
|
'filtered %s', port['device'])
|
||||||
return
|
return
|
||||||
self._remove_chains()
|
self._remove_chains()
|
||||||
self._set_ports(port)
|
self._set_ports(port)
|
||||||
@ -171,8 +170,8 @@ class IptablesFirewallDriver(firewall.FirewallDriver):
|
|||||||
def remove_port_filter(self, port):
|
def remove_port_filter(self, port):
|
||||||
LOG.debug("Removing device (%s) filter", port['device'])
|
LOG.debug("Removing device (%s) filter", port['device'])
|
||||||
if port['device'] not in self.ports:
|
if port['device'] not in self.ports:
|
||||||
LOG.info(_LI('Attempted to remove port filter which is not '
|
LOG.info('Attempted to remove port filter which is not '
|
||||||
'filtered %r'), port)
|
'filtered %r', port)
|
||||||
return
|
return
|
||||||
self._remove_chains()
|
self._remove_chains()
|
||||||
self._remove_conntrack_entries_from_port_deleted(port)
|
self._remove_conntrack_entries_from_port_deleted(port)
|
||||||
|
@ -30,7 +30,7 @@ from oslo_config import cfg
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
|
|
||||||
from neutron._i18n import _, _LE, _LW
|
from neutron._i18n import _
|
||||||
from neutron.agent.linux import ip_lib
|
from neutron.agent.linux import ip_lib
|
||||||
from neutron.agent.linux import iptables_comments as ic
|
from neutron.agent.linux import iptables_comments as ic
|
||||||
from neutron.agent.linux import utils as linux_utils
|
from neutron.agent.linux import utils as linux_utils
|
||||||
@ -247,8 +247,8 @@ class IptablesTable(object):
|
|||||||
top, self.wrap_name,
|
top, self.wrap_name,
|
||||||
comment=comment)))
|
comment=comment)))
|
||||||
except ValueError:
|
except ValueError:
|
||||||
LOG.warning(_LW('Tried to remove rule that was not there:'
|
LOG.warning('Tried to remove rule that was not there:'
|
||||||
' %(chain)r %(rule)r %(wrap)r %(top)r'),
|
' %(chain)r %(rule)r %(wrap)r %(top)r',
|
||||||
{'chain': chain, 'rule': rule,
|
{'chain': chain, 'rule': rule,
|
||||||
'top': top, 'wrap': wrap})
|
'top': top, 'wrap': wrap})
|
||||||
|
|
||||||
@ -533,8 +533,8 @@ class IptablesManager(object):
|
|||||||
commands[log_start:log_end],
|
commands[log_start:log_end],
|
||||||
log_start + 1)
|
log_start + 1)
|
||||||
)
|
)
|
||||||
LOG.error(_LE("IPTablesManager.apply failed to apply the "
|
LOG.error("IPTablesManager.apply failed to apply the "
|
||||||
"following set of iptables rules:\n%s"),
|
"following set of iptables rules:\n%s",
|
||||||
'\n'.join(log_lines))
|
'\n'.join(log_lines))
|
||||||
LOG.debug("IPTablesManager.apply completed with success. %d iptables "
|
LOG.debug("IPTablesManager.apply completed with success. %d iptables "
|
||||||
"commands were issued", len(all_commands))
|
"commands were issued", len(all_commands))
|
||||||
@ -636,9 +636,9 @@ class IptablesManager(object):
|
|||||||
def _weed_out_duplicates(line):
|
def _weed_out_duplicates(line):
|
||||||
if line in seen_lines:
|
if line in seen_lines:
|
||||||
thing = 'chain' if line.startswith(':') else 'rule'
|
thing = 'chain' if line.startswith(':') else 'rule'
|
||||||
LOG.warning(_LW("Duplicate iptables %(thing)s detected. This "
|
LOG.warning("Duplicate iptables %(thing)s detected. This "
|
||||||
"may indicate a bug in the iptables "
|
"may indicate a bug in the iptables "
|
||||||
"%(thing)s generation code. Line: %(line)s"),
|
"%(thing)s generation code. Line: %(line)s",
|
||||||
{'thing': thing, 'line': line})
|
{'thing': thing, 'line': line})
|
||||||
return False
|
return False
|
||||||
seen_lines.add(line)
|
seen_lines.add(line)
|
||||||
@ -675,8 +675,8 @@ class IptablesManager(object):
|
|||||||
"""Return the sum of the traffic counters of all rules of a chain."""
|
"""Return the sum of the traffic counters of all rules of a chain."""
|
||||||
cmd_tables = self._get_traffic_counters_cmd_tables(chain, wrap)
|
cmd_tables = self._get_traffic_counters_cmd_tables(chain, wrap)
|
||||||
if not cmd_tables:
|
if not cmd_tables:
|
||||||
LOG.warning(_LW('Attempted to get traffic counters of chain %s '
|
LOG.warning('Attempted to get traffic counters of chain %s '
|
||||||
'which does not exist'), chain)
|
'which does not exist', chain)
|
||||||
return
|
return
|
||||||
|
|
||||||
name = get_chain_name(chain, wrap)
|
name = get_chain_name(chain, wrap)
|
||||||
|
@ -23,7 +23,7 @@ from oslo_config import cfg
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from oslo_utils import fileutils
|
from oslo_utils import fileutils
|
||||||
|
|
||||||
from neutron._i18n import _, _LE
|
from neutron._i18n import _
|
||||||
from neutron.agent.linux import external_process
|
from neutron.agent.linux import external_process
|
||||||
from neutron.common import constants
|
from neutron.common import constants
|
||||||
from neutron.common import utils
|
from neutron.common import utils
|
||||||
@ -406,8 +406,8 @@ class KeepalivedManager(object):
|
|||||||
os.remove(pid_file)
|
os.remove(pid_file)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
if e.errno != errno.ENOENT:
|
if e.errno != errno.ENOENT:
|
||||||
LOG.error(_LE("Could not delete file %s, keepalived can "
|
LOG.error("Could not delete file %s, keepalived can "
|
||||||
"refuse to start."), pid_file)
|
"refuse to start.", pid_file)
|
||||||
|
|
||||||
def get_vrrp_pid_file_name(self, base_pid_file):
|
def get_vrrp_pid_file_name(self, base_pid_file):
|
||||||
return '%s-vrrp' % base_pid_file
|
return '%s-vrrp' % base_pid_file
|
||||||
|
@ -20,7 +20,6 @@ from neutron_lib import constants as lib_const
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from oslo_utils import netutils
|
from oslo_utils import netutils
|
||||||
|
|
||||||
from neutron._i18n import _LE
|
|
||||||
from neutron.agent import firewall
|
from neutron.agent import firewall
|
||||||
from neutron.agent.linux.openvswitch_firewall import constants as ovsfw_consts
|
from neutron.agent.linux.openvswitch_firewall import constants as ovsfw_consts
|
||||||
from neutron.agent.linux.openvswitch_firewall import exceptions
|
from neutron.agent.linux.openvswitch_firewall import exceptions
|
||||||
@ -463,8 +462,8 @@ class OVSFirewallDriver(firewall.FirewallDriver):
|
|||||||
# allow_address_pair MACs will be updated in
|
# allow_address_pair MACs will be updated in
|
||||||
# self.get_or_create_ofport(port)
|
# self.get_or_create_ofport(port)
|
||||||
if old_of_port:
|
if old_of_port:
|
||||||
LOG.error(_LE("Initializing port %s that was already "
|
LOG.error("Initializing port %s that was already "
|
||||||
"initialized."),
|
"initialized.",
|
||||||
port['device'])
|
port['device'])
|
||||||
self.delete_all_port_flows(old_of_port)
|
self.delete_all_port_flows(old_of_port)
|
||||||
of_port = self.get_or_create_ofport(port)
|
of_port = self.get_or_create_ofport(port)
|
||||||
|
@ -15,7 +15,6 @@
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from oslo_serialization import jsonutils
|
from oslo_serialization import jsonutils
|
||||||
|
|
||||||
from neutron._i18n import _LE
|
|
||||||
from neutron.agent.linux import async_process
|
from neutron.agent.linux import async_process
|
||||||
from neutron.agent.ovsdb import api as ovsdb
|
from neutron.agent.ovsdb import api as ovsdb
|
||||||
from neutron.agent.ovsdb.native import helpers
|
from neutron.agent.ovsdb.native import helpers
|
||||||
@ -81,7 +80,7 @@ class SimpleInterfaceMonitor(OvsdbMonitor):
|
|||||||
temporary if respawn_interval is set.
|
temporary if respawn_interval is set.
|
||||||
"""
|
"""
|
||||||
if not self.is_active():
|
if not self.is_active():
|
||||||
LOG.error(_LE("Interface monitor is not active"))
|
LOG.error("Interface monitor is not active")
|
||||||
else:
|
else:
|
||||||
self.process_events()
|
self.process_events()
|
||||||
return bool(self.new_events['added'] or self.new_events['removed'])
|
return bool(self.new_events['added'] or self.new_events['removed'])
|
||||||
|
@ -27,7 +27,7 @@ from oslo_utils import netutils
|
|||||||
import six
|
import six
|
||||||
from stevedore import driver
|
from stevedore import driver
|
||||||
|
|
||||||
from neutron._i18n import _, _LE
|
from neutron._i18n import _
|
||||||
from neutron.common import constants as l3_constants
|
from neutron.common import constants as l3_constants
|
||||||
from neutron.common import utils
|
from neutron.common import utils
|
||||||
|
|
||||||
@ -392,8 +392,8 @@ def update_router(resource, event, l3_agent, **kwargs):
|
|||||||
updated_router = kwargs['router']
|
updated_router = kwargs['router']
|
||||||
router = l3_agent.pd.routers.get(updated_router.router_id)
|
router = l3_agent.pd.routers.get(updated_router.router_id)
|
||||||
if not router:
|
if not router:
|
||||||
LOG.exception(_LE("Router to be updated is not in internal routers "
|
LOG.exception("Router to be updated is not in internal routers "
|
||||||
"list: %s"), updated_router.router_id)
|
"list: %s", updated_router.router_id)
|
||||||
else:
|
else:
|
||||||
router['ns_name'] = updated_router.get_gw_ns_name()
|
router['ns_name'] = updated_router.get_gw_ns_name()
|
||||||
|
|
||||||
|
@ -33,7 +33,7 @@ from oslo_utils import excutils
|
|||||||
from oslo_utils import fileutils
|
from oslo_utils import fileutils
|
||||||
from six.moves import http_client as httplib
|
from six.moves import http_client as httplib
|
||||||
|
|
||||||
from neutron._i18n import _, _LE
|
from neutron._i18n import _
|
||||||
from neutron.agent.linux import xenapi_root_helper
|
from neutron.agent.linux import xenapi_root_helper
|
||||||
from neutron.common import utils
|
from neutron.common import utils
|
||||||
from neutron.conf.agent import common as config
|
from neutron.conf.agent import common as config
|
||||||
@ -111,7 +111,7 @@ def execute_rootwrap_daemon(cmd, process_input, addl_env):
|
|||||||
return client.execute(cmd, process_input)
|
return client.execute(cmd, process_input)
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("Rootwrap error running command: %s"), cmd)
|
LOG.error("Rootwrap error running command: %s", cmd)
|
||||||
|
|
||||||
|
|
||||||
def execute(cmd, process_input=None, addl_env=None,
|
def execute(cmd, process_input=None, addl_env=None,
|
||||||
@ -249,7 +249,7 @@ def get_value_from_file(filename, converter=None):
|
|||||||
try:
|
try:
|
||||||
return converter(f.read()) if converter else f.read()
|
return converter(f.read()) if converter else f.read()
|
||||||
except ValueError:
|
except ValueError:
|
||||||
LOG.error(_LE('Unable to convert value in %s'), filename)
|
LOG.error('Unable to convert value in %s', filename)
|
||||||
except IOError:
|
except IOError:
|
||||||
LOG.debug('Unable to access %s', filename)
|
LOG.debug('Unable to access %s', filename)
|
||||||
|
|
||||||
|
@ -29,7 +29,6 @@ from oslo_log import log as logging
|
|||||||
from oslo_rootwrap import cmd as oslo_rootwrap_cmd
|
from oslo_rootwrap import cmd as oslo_rootwrap_cmd
|
||||||
from oslo_serialization import jsonutils
|
from oslo_serialization import jsonutils
|
||||||
|
|
||||||
from neutron._i18n import _LE
|
|
||||||
from neutron.conf.agent import xenapi_conf
|
from neutron.conf.agent import xenapi_conf
|
||||||
|
|
||||||
|
|
||||||
@ -88,6 +87,6 @@ class XenAPIClient(object):
|
|||||||
err = result['err']
|
err = result['err']
|
||||||
return returncode, out, err
|
return returncode, out, err
|
||||||
except XenAPI.Failure as failure:
|
except XenAPI.Failure as failure:
|
||||||
LOG.exception(_LE('Failed to execute command: %s'), cmd)
|
LOG.exception('Failed to execute command: %s', cmd)
|
||||||
returncode = self._get_return_code(failure.details)
|
returncode = self._get_return_code(failure.details)
|
||||||
return returncode, out, err
|
return returncode, out, err
|
||||||
|
@ -27,7 +27,7 @@ import six
|
|||||||
import six.moves.urllib.parse as urlparse
|
import six.moves.urllib.parse as urlparse
|
||||||
import webob
|
import webob
|
||||||
|
|
||||||
from neutron._i18n import _, _LE, _LW
|
from neutron._i18n import _
|
||||||
from neutron.agent.linux import utils as agent_utils
|
from neutron.agent.linux import utils as agent_utils
|
||||||
from neutron.agent import rpc as agent_rpc
|
from neutron.agent import rpc as agent_rpc
|
||||||
from neutron.common import cache_utils as cache
|
from neutron.common import cache_utils as cache
|
||||||
@ -92,7 +92,7 @@ class MetadataProxyHandler(object):
|
|||||||
return webob.exc.HTTPNotFound()
|
return webob.exc.HTTPNotFound()
|
||||||
|
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Unexpected error."))
|
LOG.exception("Unexpected error.")
|
||||||
msg = _('An unknown error has occurred. '
|
msg = _('An unknown error has occurred. '
|
||||||
'Please try your request again.')
|
'Please try your request again.')
|
||||||
explanation = six.text_type(msg)
|
explanation = six.text_type(msg)
|
||||||
@ -198,10 +198,10 @@ class MetadataProxyHandler(object):
|
|||||||
LOG.debug(str(resp))
|
LOG.debug(str(resp))
|
||||||
return req.response
|
return req.response
|
||||||
elif resp.status == 403:
|
elif resp.status == 403:
|
||||||
LOG.warning(_LW(
|
LOG.warning(
|
||||||
'The remote metadata server responded with Forbidden. This '
|
'The remote metadata server responded with Forbidden. This '
|
||||||
'response usually occurs when shared secrets do not match.'
|
'response usually occurs when shared secrets do not match.'
|
||||||
))
|
)
|
||||||
return webob.exc.HTTPForbidden()
|
return webob.exc.HTTPForbidden()
|
||||||
elif resp.status == 400:
|
elif resp.status == 400:
|
||||||
return webob.exc.HTTPBadRequest()
|
return webob.exc.HTTPBadRequest()
|
||||||
@ -262,12 +262,12 @@ class UnixDomainMetadataProxy(object):
|
|||||||
use_call=self.agent_state.get('start_flag'))
|
use_call=self.agent_state.get('start_flag'))
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
# This means the server does not support report_state
|
# This means the server does not support report_state
|
||||||
LOG.warning(_LW('Neutron server does not support state report.'
|
LOG.warning('Neutron server does not support state report.'
|
||||||
' State report for this agent will be disabled.'))
|
' State report for this agent will be disabled.')
|
||||||
self.heartbeat.stop()
|
self.heartbeat.stop()
|
||||||
return
|
return
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Failed reporting state!"))
|
LOG.exception("Failed reporting state!")
|
||||||
return
|
return
|
||||||
self.agent_state.pop('start_flag', None)
|
self.agent_state.pop('start_flag', None)
|
||||||
|
|
||||||
|
@ -22,7 +22,6 @@ from oslo_utils import excutils
|
|||||||
from oslo_utils import uuidutils
|
from oslo_utils import uuidutils
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from neutron._i18n import _LE
|
|
||||||
from neutron.agent.common import utils
|
from neutron.agent.common import utils
|
||||||
from neutron.agent.ovsdb import api as ovsdb
|
from neutron.agent.ovsdb import api as ovsdb
|
||||||
|
|
||||||
@ -70,8 +69,8 @@ class Transaction(ovsdb.Transaction):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
with excutils.save_and_reraise_exception() as ctxt:
|
with excutils.save_and_reraise_exception() as ctxt:
|
||||||
if self.log_errors:
|
if self.log_errors:
|
||||||
LOG.error(_LE("Unable to execute %(cmd)s. "
|
LOG.error("Unable to execute %(cmd)s. "
|
||||||
"Exception: %(exception)s"),
|
"Exception: %(exception)s",
|
||||||
{'cmd': full_args, 'exception': e})
|
{'cmd': full_args, 'exception': e})
|
||||||
if not self.check_error:
|
if not self.check_error:
|
||||||
ctxt.reraise = False
|
ctxt.reraise = False
|
||||||
@ -130,8 +129,8 @@ class DbCommand(BaseCommand):
|
|||||||
# This shouldn't happen, but if it does and we check_errors
|
# This shouldn't happen, but if it does and we check_errors
|
||||||
# log and raise.
|
# log and raise.
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("Could not parse: %(raw_result)s. "
|
LOG.error("Could not parse: %(raw_result)s. "
|
||||||
"Exception: %(exception)s"),
|
"Exception: %(exception)s",
|
||||||
{'raw_result': raw_result, 'exception': e})
|
{'raw_result': raw_result, 'exception': e})
|
||||||
|
|
||||||
headings = json['headings']
|
headings = json['headings']
|
||||||
|
@ -21,7 +21,6 @@ from oslo_config import cfg
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
import oslo_messaging
|
import oslo_messaging
|
||||||
|
|
||||||
from neutron._i18n import _LI, _LW
|
|
||||||
from neutron.agent import firewall
|
from neutron.agent import firewall
|
||||||
from neutron.api.rpc.handlers import securitygroups_rpc
|
from neutron.api.rpc.handlers import securitygroups_rpc
|
||||||
from neutron.conf.agent import securitygroups_rpc as sc_cfg
|
from neutron.conf.agent import securitygroups_rpc as sc_cfg
|
||||||
@ -44,9 +43,9 @@ def _disable_extension(extension, aliases):
|
|||||||
|
|
||||||
def disable_security_group_extension_by_config(aliases):
|
def disable_security_group_extension_by_config(aliases):
|
||||||
if not is_firewall_enabled():
|
if not is_firewall_enabled():
|
||||||
LOG.info(_LI('Disabled security-group extension.'))
|
LOG.info('Disabled security-group extension.')
|
||||||
_disable_extension('security-group', aliases)
|
_disable_extension('security-group', aliases)
|
||||||
LOG.info(_LI('Disabled allowed-address-pairs extension.'))
|
LOG.info('Disabled allowed-address-pairs extension.')
|
||||||
_disable_extension('allowed-address-pairs', aliases)
|
_disable_extension('allowed-address-pairs', aliases)
|
||||||
|
|
||||||
|
|
||||||
@ -91,10 +90,10 @@ class SecurityGroupAgentRpc(object):
|
|||||||
self.plugin_rpc.security_group_info_for_devices(
|
self.plugin_rpc.security_group_info_for_devices(
|
||||||
self.context, devices=[])
|
self.context, devices=[])
|
||||||
except oslo_messaging.UnsupportedVersion:
|
except oslo_messaging.UnsupportedVersion:
|
||||||
LOG.warning(_LW('security_group_info_for_devices rpc call not '
|
LOG.warning('security_group_info_for_devices rpc call not '
|
||||||
'supported by the server, falling back to old '
|
'supported by the server, falling back to old '
|
||||||
'security_group_rules_for_devices which scales '
|
'security_group_rules_for_devices which scales '
|
||||||
'worse.'))
|
'worse.')
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@ -103,8 +102,8 @@ class SecurityGroupAgentRpc(object):
|
|||||||
def decorated_function(self, *args, **kwargs):
|
def decorated_function(self, *args, **kwargs):
|
||||||
if (isinstance(self.firewall, firewall.NoopFirewallDriver) or
|
if (isinstance(self.firewall, firewall.NoopFirewallDriver) or
|
||||||
not is_firewall_enabled()):
|
not is_firewall_enabled()):
|
||||||
LOG.info(_LI("Skipping method %s as firewall is disabled "
|
LOG.info("Skipping method %s as firewall is disabled "
|
||||||
"or configured as NoopFirewallDriver."),
|
"or configured as NoopFirewallDriver.",
|
||||||
func.__name__)
|
func.__name__)
|
||||||
else:
|
else:
|
||||||
return func(self, # pylint: disable=not-callable
|
return func(self, # pylint: disable=not-callable
|
||||||
@ -115,7 +114,7 @@ class SecurityGroupAgentRpc(object):
|
|||||||
def prepare_devices_filter(self, device_ids):
|
def prepare_devices_filter(self, device_ids):
|
||||||
if not device_ids:
|
if not device_ids:
|
||||||
return
|
return
|
||||||
LOG.info(_LI("Preparing filters for devices %s"), device_ids)
|
LOG.info("Preparing filters for devices %s", device_ids)
|
||||||
self._apply_port_filter(device_ids)
|
self._apply_port_filter(device_ids)
|
||||||
|
|
||||||
def _apply_port_filter(self, device_ids, update_filter=False):
|
def _apply_port_filter(self, device_ids, update_filter=False):
|
||||||
@ -155,16 +154,16 @@ class SecurityGroupAgentRpc(object):
|
|||||||
remote_sg_id, member_ips)
|
remote_sg_id, member_ips)
|
||||||
|
|
||||||
def security_groups_rule_updated(self, security_groups):
|
def security_groups_rule_updated(self, security_groups):
|
||||||
LOG.info(_LI("Security group "
|
LOG.info("Security group "
|
||||||
"rule updated %r"), security_groups)
|
"rule updated %r", security_groups)
|
||||||
self._security_group_updated(
|
self._security_group_updated(
|
||||||
security_groups,
|
security_groups,
|
||||||
'security_groups',
|
'security_groups',
|
||||||
'sg_rule')
|
'sg_rule')
|
||||||
|
|
||||||
def security_groups_member_updated(self, security_groups):
|
def security_groups_member_updated(self, security_groups):
|
||||||
LOG.info(_LI("Security group "
|
LOG.info("Security group "
|
||||||
"member updated %r"), security_groups)
|
"member updated %r", security_groups)
|
||||||
self._security_group_updated(
|
self._security_group_updated(
|
||||||
security_groups,
|
security_groups,
|
||||||
'security_group_source_groups',
|
'security_group_source_groups',
|
||||||
@ -188,7 +187,7 @@ class SecurityGroupAgentRpc(object):
|
|||||||
self.refresh_firewall(devices)
|
self.refresh_firewall(devices)
|
||||||
|
|
||||||
def security_groups_provider_updated(self, port_ids_to_update):
|
def security_groups_provider_updated(self, port_ids_to_update):
|
||||||
LOG.info(_LI("Provider rule updated"))
|
LOG.info("Provider rule updated")
|
||||||
if port_ids_to_update is None:
|
if port_ids_to_update is None:
|
||||||
# Update all devices
|
# Update all devices
|
||||||
if self.defer_refresh_firewall:
|
if self.defer_refresh_firewall:
|
||||||
@ -211,7 +210,7 @@ class SecurityGroupAgentRpc(object):
|
|||||||
def remove_devices_filter(self, device_ids):
|
def remove_devices_filter(self, device_ids):
|
||||||
if not device_ids:
|
if not device_ids:
|
||||||
return
|
return
|
||||||
LOG.info(_LI("Remove device filter for %r"), device_ids)
|
LOG.info("Remove device filter for %r", device_ids)
|
||||||
with self.firewall.defer_apply():
|
with self.firewall.defer_apply():
|
||||||
for device_id in device_ids:
|
for device_id in device_ids:
|
||||||
device = self.firewall.ports.get(device_id)
|
device = self.firewall.ports.get(device_id)
|
||||||
@ -222,11 +221,11 @@ class SecurityGroupAgentRpc(object):
|
|||||||
|
|
||||||
@skip_if_noopfirewall_or_firewall_disabled
|
@skip_if_noopfirewall_or_firewall_disabled
|
||||||
def refresh_firewall(self, device_ids=None):
|
def refresh_firewall(self, device_ids=None):
|
||||||
LOG.info(_LI("Refresh firewall rules"))
|
LOG.info("Refresh firewall rules")
|
||||||
if not device_ids:
|
if not device_ids:
|
||||||
device_ids = self.firewall.ports.keys()
|
device_ids = self.firewall.ports.keys()
|
||||||
if not device_ids:
|
if not device_ids:
|
||||||
LOG.info(_LI("No ports here to refresh firewall"))
|
LOG.info("No ports here to refresh firewall")
|
||||||
return
|
return
|
||||||
self._apply_port_filter(device_ids, update_filter=True)
|
self._apply_port_filter(device_ids, update_filter=True)
|
||||||
|
|
||||||
|
@ -17,8 +17,6 @@ import netifaces
|
|||||||
|
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from neutron._i18n import _LE
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
OPTS = []
|
OPTS = []
|
||||||
@ -38,7 +36,7 @@ class IPWrapper(object):
|
|||||||
try:
|
try:
|
||||||
return [IPDevice(iface) for iface in netifaces.interfaces()]
|
return [IPDevice(iface) for iface in netifaces.interfaces()]
|
||||||
except (OSError, MemoryError):
|
except (OSError, MemoryError):
|
||||||
LOG.error(_LE("Failed to get network interfaces."))
|
LOG.error("Failed to get network interfaces.")
|
||||||
return []
|
return []
|
||||||
|
|
||||||
|
|
||||||
@ -52,11 +50,11 @@ class IPDevice(object):
|
|||||||
try:
|
try:
|
||||||
device_addresses = netifaces.ifaddresses(self.name)
|
device_addresses = netifaces.ifaddresses(self.name)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
LOG.error(_LE("The device does not exist on the system: %s."),
|
LOG.error("The device does not exist on the system: %s.",
|
||||||
self.name)
|
self.name)
|
||||||
return
|
return
|
||||||
except OSError:
|
except OSError:
|
||||||
LOG.error(_LE("Failed to get interface addresses: %s."),
|
LOG.error("Failed to get interface addresses: %s.",
|
||||||
self.name)
|
self.name)
|
||||||
return
|
return
|
||||||
return device_addresses
|
return device_addresses
|
||||||
|
@ -24,7 +24,7 @@ from oslo_serialization import jsonutils
|
|||||||
from six.moves.urllib import parse
|
from six.moves.urllib import parse
|
||||||
from webob import exc
|
from webob import exc
|
||||||
|
|
||||||
from neutron._i18n import _, _LW
|
from neutron._i18n import _
|
||||||
from neutron.api import extensions
|
from neutron.api import extensions
|
||||||
from neutron.common import constants
|
from neutron.common import constants
|
||||||
from neutron import wsgi
|
from neutron import wsgi
|
||||||
@ -153,8 +153,8 @@ def _get_pagination_max_limit():
|
|||||||
if max_limit == 0:
|
if max_limit == 0:
|
||||||
raise ValueError()
|
raise ValueError()
|
||||||
except ValueError:
|
except ValueError:
|
||||||
LOG.warning(_LW("Invalid value for pagination_max_limit: %s. It "
|
LOG.warning("Invalid value for pagination_max_limit: %s. It "
|
||||||
"should be an integer greater to 0"),
|
"should be an integer greater to 0",
|
||||||
cfg.CONF.pagination_max_limit)
|
cfg.CONF.pagination_max_limit)
|
||||||
return max_limit
|
return max_limit
|
||||||
|
|
||||||
|
@ -26,7 +26,7 @@ import routes
|
|||||||
import webob.dec
|
import webob.dec
|
||||||
import webob.exc
|
import webob.exc
|
||||||
|
|
||||||
from neutron._i18n import _, _LE, _LI, _LW
|
from neutron._i18n import _
|
||||||
from neutron.common import exceptions
|
from neutron.common import exceptions
|
||||||
from neutron import extensions as core_extensions
|
from neutron import extensions as core_extensions
|
||||||
from neutron.plugins.common import constants as const
|
from neutron.plugins.common import constants as const
|
||||||
@ -283,7 +283,7 @@ class ExtensionManager(object):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, path):
|
def __init__(self, path):
|
||||||
LOG.info(_LI('Initializing extension manager.'))
|
LOG.info('Initializing extension manager.')
|
||||||
self.path = path
|
self.path = path
|
||||||
self.extensions = {}
|
self.extensions = {}
|
||||||
self._load_all_extensions()
|
self._load_all_extensions()
|
||||||
@ -359,10 +359,10 @@ class ExtensionManager(object):
|
|||||||
break
|
break
|
||||||
if exts_to_process:
|
if exts_to_process:
|
||||||
unloadable_extensions = set(exts_to_process.keys())
|
unloadable_extensions = set(exts_to_process.keys())
|
||||||
LOG.error(_LE("Unable to process extensions (%s) because "
|
LOG.error("Unable to process extensions (%s) because "
|
||||||
"the configured plugins do not satisfy "
|
"the configured plugins do not satisfy "
|
||||||
"their requirements. Some features will not "
|
"their requirements. Some features will not "
|
||||||
"work as expected."),
|
"work as expected.",
|
||||||
', '.join(unloadable_extensions))
|
', '.join(unloadable_extensions))
|
||||||
self._check_faulty_extensions(unloadable_extensions)
|
self._check_faulty_extensions(unloadable_extensions)
|
||||||
# Extending extensions' attributes map.
|
# Extending extensions' attributes map.
|
||||||
@ -398,7 +398,7 @@ class ExtensionManager(object):
|
|||||||
'desc': extension.get_description(),
|
'desc': extension.get_description(),
|
||||||
'updated': extension.get_updated()})
|
'updated': extension.get_updated()})
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
LOG.exception(_LE("Exception loading extension"))
|
LOG.exception("Exception loading extension")
|
||||||
return False
|
return False
|
||||||
return isinstance(extension, api_extensions.ExtensionDescriptor)
|
return isinstance(extension, api_extensions.ExtensionDescriptor)
|
||||||
|
|
||||||
@ -417,7 +417,7 @@ class ExtensionManager(object):
|
|||||||
if os.path.exists(path):
|
if os.path.exists(path):
|
||||||
self._load_all_extensions_from_path(path)
|
self._load_all_extensions_from_path(path)
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE("Extension path '%s' doesn't exist!"), path)
|
LOG.error("Extension path '%s' doesn't exist!", path)
|
||||||
|
|
||||||
def _load_all_extensions_from_path(self, path):
|
def _load_all_extensions_from_path(self, path):
|
||||||
# Sorting the extension list makes the order in which they
|
# Sorting the extension list makes the order in which they
|
||||||
@ -433,16 +433,16 @@ class ExtensionManager(object):
|
|||||||
ext_name = mod_name.capitalize()
|
ext_name = mod_name.capitalize()
|
||||||
new_ext_class = getattr(mod, ext_name, None)
|
new_ext_class = getattr(mod, ext_name, None)
|
||||||
if not new_ext_class:
|
if not new_ext_class:
|
||||||
LOG.warning(_LW('Did not find expected name '
|
LOG.warning('Did not find expected name '
|
||||||
'"%(ext_name)s" in %(file)s'),
|
'"%(ext_name)s" in %(file)s',
|
||||||
{'ext_name': ext_name,
|
{'ext_name': ext_name,
|
||||||
'file': ext_path})
|
'file': ext_path})
|
||||||
continue
|
continue
|
||||||
new_ext = new_ext_class()
|
new_ext = new_ext_class()
|
||||||
self.add_extension(new_ext)
|
self.add_extension(new_ext)
|
||||||
except Exception as exception:
|
except Exception as exception:
|
||||||
LOG.warning(_LW("Extension file %(f)s wasn't loaded due to "
|
LOG.warning("Extension file %(f)s wasn't loaded due to "
|
||||||
"%(exception)s"),
|
"%(exception)s",
|
||||||
{'f': f, 'exception': exception})
|
{'f': f, 'exception': exception})
|
||||||
|
|
||||||
def add_extension(self, ext):
|
def add_extension(self, ext):
|
||||||
@ -451,7 +451,7 @@ class ExtensionManager(object):
|
|||||||
return
|
return
|
||||||
|
|
||||||
alias = ext.get_alias()
|
alias = ext.get_alias()
|
||||||
LOG.info(_LI('Loaded extension: %s'), alias)
|
LOG.info('Loaded extension: %s', alias)
|
||||||
|
|
||||||
if alias in self.extensions:
|
if alias in self.extensions:
|
||||||
raise exceptions.DuplicatedExtension(alias=alias)
|
raise exceptions.DuplicatedExtension(alias=alias)
|
||||||
@ -485,9 +485,8 @@ class PluginAwareExtensionManager(ExtensionManager):
|
|||||||
alias = extension.get_alias()
|
alias = extension.get_alias()
|
||||||
supports_extension = alias in self.get_supported_extension_aliases()
|
supports_extension = alias in self.get_supported_extension_aliases()
|
||||||
if not supports_extension:
|
if not supports_extension:
|
||||||
LOG.info(_LI("Extension %s not supported by any of loaded "
|
LOG.info("Extension %s not supported by any of loaded "
|
||||||
"plugins"),
|
"plugins", alias)
|
||||||
alias)
|
|
||||||
return supports_extension
|
return supports_extension
|
||||||
|
|
||||||
def _plugins_implement_interface(self, extension):
|
def _plugins_implement_interface(self, extension):
|
||||||
@ -496,8 +495,8 @@ class PluginAwareExtensionManager(ExtensionManager):
|
|||||||
for plugin in self.plugins.values():
|
for plugin in self.plugins.values():
|
||||||
if isinstance(plugin, extension.get_plugin_interface()):
|
if isinstance(plugin, extension.get_plugin_interface()):
|
||||||
return True
|
return True
|
||||||
LOG.warning(_LW("Loaded plugins do not implement extension "
|
LOG.warning("Loaded plugins do not implement extension "
|
||||||
"%s interface"),
|
"%s interface",
|
||||||
extension.get_alias())
|
extension.get_alias())
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
@ -22,7 +22,6 @@ from oslo_config import cfg
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
import oslo_messaging
|
import oslo_messaging
|
||||||
|
|
||||||
from neutron._i18n import _LE, _LW
|
|
||||||
from neutron.common import constants as n_const
|
from neutron.common import constants as n_const
|
||||||
from neutron.common import rpc as n_rpc
|
from neutron.common import rpc as n_rpc
|
||||||
from neutron.common import topics
|
from neutron.common import topics
|
||||||
@ -105,9 +104,9 @@ class DhcpAgentNotifyAPI(object):
|
|||||||
context, 'network_create_end',
|
context, 'network_create_end',
|
||||||
{'network': {'id': network['id']}}, agent['host'])
|
{'network': {'id': network['id']}}, agent['host'])
|
||||||
elif not existing_agents:
|
elif not existing_agents:
|
||||||
LOG.warning(_LW('Unable to schedule network %s: no agents '
|
LOG.warning('Unable to schedule network %s: no agents '
|
||||||
'available; will retry on subsequent port '
|
'available; will retry on subsequent port '
|
||||||
'and subnet creation events.'),
|
'and subnet creation events.',
|
||||||
network['id'])
|
network['id'])
|
||||||
return new_agents + existing_agents
|
return new_agents + existing_agents
|
||||||
|
|
||||||
@ -123,10 +122,10 @@ class DhcpAgentNotifyAPI(object):
|
|||||||
len_enabled_agents = len(enabled_agents)
|
len_enabled_agents = len(enabled_agents)
|
||||||
len_active_agents = len(active_agents)
|
len_active_agents = len(active_agents)
|
||||||
if len_active_agents < len_enabled_agents:
|
if len_active_agents < len_enabled_agents:
|
||||||
LOG.warning(_LW("Only %(active)d of %(total)d DHCP agents "
|
LOG.warning("Only %(active)d of %(total)d DHCP agents "
|
||||||
"associated with network '%(net_id)s' "
|
"associated with network '%(net_id)s' "
|
||||||
"are marked as active, so notifications "
|
"are marked as active, so notifications "
|
||||||
"may be sent to inactive agents."),
|
"may be sent to inactive agents.",
|
||||||
{'active': len_active_agents,
|
{'active': len_active_agents,
|
||||||
'total': len_enabled_agents,
|
'total': len_enabled_agents,
|
||||||
'net_id': network_id})
|
'net_id': network_id})
|
||||||
@ -136,9 +135,9 @@ class DhcpAgentNotifyAPI(object):
|
|||||||
notification_required = (
|
notification_required = (
|
||||||
num_ports > 0 and len(network['subnets']) >= 1)
|
num_ports > 0 and len(network['subnets']) >= 1)
|
||||||
if notification_required:
|
if notification_required:
|
||||||
LOG.error(_LE("Will not send event %(method)s for network "
|
LOG.error("Will not send event %(method)s for network "
|
||||||
"%(net_id)s: no agent available. Payload: "
|
"%(net_id)s: no agent available. Payload: "
|
||||||
"%(payload)s"),
|
"%(payload)s",
|
||||||
{'method': method,
|
{'method': method,
|
||||||
'net_id': network_id,
|
'net_id': network_id,
|
||||||
'payload': payload})
|
'payload': payload})
|
||||||
|
@ -21,7 +21,6 @@ from neutron_lib.plugins import directory
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
import oslo_messaging
|
import oslo_messaging
|
||||||
|
|
||||||
from neutron._i18n import _LE
|
|
||||||
from neutron.api.rpc.agentnotifiers import utils as ag_utils
|
from neutron.api.rpc.agentnotifiers import utils as ag_utils
|
||||||
from neutron.common import rpc as n_rpc
|
from neutron.common import rpc as n_rpc
|
||||||
from neutron.common import topics
|
from neutron.common import topics
|
||||||
@ -88,8 +87,8 @@ class L3AgentNotifyAPI(object):
|
|||||||
"""Notify all the agents that are hosting the routers."""
|
"""Notify all the agents that are hosting the routers."""
|
||||||
plugin = directory.get_plugin(plugin_constants.L3)
|
plugin = directory.get_plugin(plugin_constants.L3)
|
||||||
if not plugin:
|
if not plugin:
|
||||||
LOG.error(_LE('No plugin for L3 routing registered. Cannot notify '
|
LOG.error('No plugin for L3 routing registered. Cannot notify '
|
||||||
'agents with the message %s'), method)
|
'agents with the message %s', method)
|
||||||
return
|
return
|
||||||
if utils.is_extension_supported(
|
if utils.is_extension_supported(
|
||||||
plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS):
|
plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS):
|
||||||
|
@ -17,8 +17,6 @@ from oslo_log import log as logging
|
|||||||
import oslo_messaging
|
import oslo_messaging
|
||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
|
|
||||||
from neutron._i18n import _LW
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@ -42,8 +40,8 @@ def _call_with_retry(max_attempts):
|
|||||||
with excutils.save_and_reraise_exception(
|
with excutils.save_and_reraise_exception(
|
||||||
reraise=False) as ctxt:
|
reraise=False) as ctxt:
|
||||||
LOG.warning(
|
LOG.warning(
|
||||||
_LW('Failed to execute %(action)s. %(attempt)d out'
|
'Failed to execute %(action)s. %(attempt)d out'
|
||||||
' of %(max_attempts)d'),
|
' of %(max_attempts)d',
|
||||||
{'attempt': attempt,
|
{'attempt': attempt,
|
||||||
'max_attempts': max_attempts,
|
'max_attempts': max_attempts,
|
||||||
'action': action})
|
'action': action})
|
||||||
|
@ -28,7 +28,7 @@ from oslo_log import log as logging
|
|||||||
import oslo_messaging
|
import oslo_messaging
|
||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
|
|
||||||
from neutron._i18n import _, _LW
|
from neutron._i18n import _
|
||||||
from neutron.common import constants as n_const
|
from neutron.common import constants as n_const
|
||||||
from neutron.common import exceptions as n_exc
|
from neutron.common import exceptions as n_exc
|
||||||
from neutron.common import utils
|
from neutron.common import utils
|
||||||
@ -120,9 +120,9 @@ class DhcpRpcCallback(object):
|
|||||||
ctxt.reraise = True
|
ctxt.reraise = True
|
||||||
if ctxt.reraise:
|
if ctxt.reraise:
|
||||||
net_id = port['port']['network_id']
|
net_id = port['port']['network_id']
|
||||||
LOG.warning(_LW("Action %(action)s for network %(net_id)s "
|
LOG.warning("Action %(action)s for network %(net_id)s "
|
||||||
"could not complete successfully: "
|
"could not complete successfully: "
|
||||||
"%(reason)s"),
|
"%(reason)s",
|
||||||
{"action": action,
|
{"action": action,
|
||||||
"net_id": net_id,
|
"net_id": net_id,
|
||||||
'reason': e})
|
'reason': e})
|
||||||
|
@ -19,7 +19,6 @@ from neutron_lib.utils import net
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
import oslo_messaging
|
import oslo_messaging
|
||||||
|
|
||||||
from neutron._i18n import _LW
|
|
||||||
from neutron.api.rpc.handlers import resources_rpc
|
from neutron.api.rpc.handlers import resources_rpc
|
||||||
from neutron.callbacks import events
|
from neutron.callbacks import events
|
||||||
from neutron.callbacks import registry
|
from neutron.callbacks import registry
|
||||||
@ -192,9 +191,9 @@ class SecurityGroupAgentRpcCallbackMixin(object):
|
|||||||
sg_agent = None
|
sg_agent = None
|
||||||
|
|
||||||
def _security_groups_agent_not_set(self):
|
def _security_groups_agent_not_set(self):
|
||||||
LOG.warning(_LW("Security group agent binding currently not set. "
|
LOG.warning("Security group agent binding currently not set. "
|
||||||
"This should be set by the end of the init "
|
"This should be set by the end of the init "
|
||||||
"process."))
|
"process.")
|
||||||
|
|
||||||
def security_groups_rule_updated(self, context, **kwargs):
|
def security_groups_rule_updated(self, context, **kwargs):
|
||||||
"""Callback for security group rule update.
|
"""Callback for security group rule update.
|
||||||
|
@ -26,7 +26,7 @@ from oslo_policy import policy as oslo_policy
|
|||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
import webob.exc
|
import webob.exc
|
||||||
|
|
||||||
from neutron._i18n import _, _LE, _LI
|
from neutron._i18n import _
|
||||||
from neutron.api import api_common
|
from neutron.api import api_common
|
||||||
from neutron.api.v2 import resource as wsgi_resource
|
from neutron.api.v2 import resource as wsgi_resource
|
||||||
from neutron.common import constants as n_const
|
from neutron.common import constants as n_const
|
||||||
@ -125,8 +125,8 @@ class Controller(object):
|
|||||||
_("Native pagination depend on native sorting")
|
_("Native pagination depend on native sorting")
|
||||||
)
|
)
|
||||||
if not self._allow_sorting:
|
if not self._allow_sorting:
|
||||||
LOG.info(_LI("Allow sorting is enabled because native "
|
LOG.info("Allow sorting is enabled because native "
|
||||||
"pagination requires native sorting"))
|
"pagination requires native sorting")
|
||||||
self._allow_sorting = True
|
self._allow_sorting = True
|
||||||
self.parent = parent
|
self.parent = parent
|
||||||
if parent:
|
if parent:
|
||||||
@ -419,8 +419,8 @@ class Controller(object):
|
|||||||
except Exception:
|
except Exception:
|
||||||
# broad catch as our only purpose is to log the
|
# broad catch as our only purpose is to log the
|
||||||
# exception
|
# exception
|
||||||
LOG.exception(_LE("Unable to undo add for "
|
LOG.exception("Unable to undo add for "
|
||||||
"%(resource)s %(id)s"),
|
"%(resource)s %(id)s",
|
||||||
{'resource': self._resource,
|
{'resource': self._resource,
|
||||||
'id': obj['id']})
|
'id': obj['id']})
|
||||||
# TODO(salvatore-orlando): The object being processed when the
|
# TODO(salvatore-orlando): The object being processed when the
|
||||||
|
@ -21,7 +21,6 @@ from oslo_log import log as logging
|
|||||||
import webob.dec
|
import webob.dec
|
||||||
import webob.exc
|
import webob.exc
|
||||||
|
|
||||||
from neutron._i18n import _LE, _LI
|
|
||||||
from neutron.api import api_common
|
from neutron.api import api_common
|
||||||
from neutron.common import utils
|
from neutron.common import utils
|
||||||
from neutron import wsgi
|
from neutron import wsgi
|
||||||
@ -101,16 +100,15 @@ def Resource(controller, faults=None, deserializers=None, serializers=None,
|
|||||||
mapped_exc = api_common.convert_exception_to_http_exc(e, faults,
|
mapped_exc = api_common.convert_exception_to_http_exc(e, faults,
|
||||||
language)
|
language)
|
||||||
if hasattr(mapped_exc, 'code') and 400 <= mapped_exc.code < 500:
|
if hasattr(mapped_exc, 'code') and 400 <= mapped_exc.code < 500:
|
||||||
LOG.info(_LI('%(action)s failed (client error): %(exc)s'),
|
LOG.info('%(action)s failed (client error): %(exc)s',
|
||||||
{'action': action, 'exc': mapped_exc})
|
{'action': action, 'exc': mapped_exc})
|
||||||
else:
|
else:
|
||||||
LOG.exception(
|
LOG.exception('%(action)s failed: %(details)s',
|
||||||
_LE('%(action)s failed: %(details)s'),
|
{
|
||||||
{
|
'action': action,
|
||||||
'action': action,
|
'details': utils.extract_exc_details(e),
|
||||||
'details': utils.extract_exc_details(e),
|
}
|
||||||
}
|
)
|
||||||
)
|
|
||||||
raise mapped_exc
|
raise mapped_exc
|
||||||
|
|
||||||
status = action_status.get(action, 200)
|
status = action_status.get(action, 200)
|
||||||
|
@ -16,7 +16,6 @@
|
|||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from neutron._i18n import _LE, _LI
|
|
||||||
from neutron.agent.linux import utils
|
from neutron.agent.linux import utils
|
||||||
from neutron.common import config
|
from neutron.common import config
|
||||||
from neutron.conf.agent import cmd as command
|
from neutron.conf.agent import cmd as command
|
||||||
@ -43,7 +42,7 @@ def remove_iptables_reference(ipset):
|
|||||||
|
|
||||||
if ipset in iptables_save:
|
if ipset in iptables_save:
|
||||||
cmd = ['iptables'] if 'IPv4' in ipset else ['ip6tables']
|
cmd = ['iptables'] if 'IPv4' in ipset else ['ip6tables']
|
||||||
LOG.info(_LI("Removing iptables rule for IPset: %s"), ipset)
|
LOG.info("Removing iptables rule for IPset: %s", ipset)
|
||||||
for rule in iptables_save.splitlines():
|
for rule in iptables_save.splitlines():
|
||||||
if '--match-set %s ' % ipset in rule and rule.startswith('-A'):
|
if '--match-set %s ' % ipset in rule and rule.startswith('-A'):
|
||||||
# change to delete
|
# change to delete
|
||||||
@ -52,8 +51,8 @@ def remove_iptables_reference(ipset):
|
|||||||
try:
|
try:
|
||||||
utils.execute(cmd + params, run_as_root=True)
|
utils.execute(cmd + params, run_as_root=True)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE('Error, unable to remove iptables rule '
|
LOG.exception('Error, unable to remove iptables rule '
|
||||||
'for IPset: %s'), ipset)
|
'for IPset: %s', ipset)
|
||||||
|
|
||||||
|
|
||||||
def destroy_ipset(conf, ipset):
|
def destroy_ipset(conf, ipset):
|
||||||
@ -62,17 +61,17 @@ def destroy_ipset(conf, ipset):
|
|||||||
if conf.force:
|
if conf.force:
|
||||||
remove_iptables_reference(ipset)
|
remove_iptables_reference(ipset)
|
||||||
|
|
||||||
LOG.info(_LI("Destroying IPset: %s"), ipset)
|
LOG.info("Destroying IPset: %s", ipset)
|
||||||
cmd = ['ipset', 'destroy', ipset]
|
cmd = ['ipset', 'destroy', ipset]
|
||||||
try:
|
try:
|
||||||
utils.execute(cmd, run_as_root=True)
|
utils.execute(cmd, run_as_root=True)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE('Error, unable to destroy IPset: %s'), ipset)
|
LOG.exception('Error, unable to destroy IPset: %s', ipset)
|
||||||
|
|
||||||
|
|
||||||
def cleanup_ipsets(conf):
|
def cleanup_ipsets(conf):
|
||||||
# Identify ipsets for destruction.
|
# Identify ipsets for destruction.
|
||||||
LOG.info(_LI("Destroying IPsets with prefix: %s"), conf.prefix)
|
LOG.info("Destroying IPsets with prefix: %s", conf.prefix)
|
||||||
|
|
||||||
cmd = ['ipset', '-L', '-n']
|
cmd = ['ipset', '-L', '-n']
|
||||||
ipsets = utils.execute(cmd, run_as_root=True)
|
ipsets = utils.execute(cmd, run_as_root=True)
|
||||||
@ -80,7 +79,7 @@ def cleanup_ipsets(conf):
|
|||||||
if conf.allsets or ipset.startswith(conf.prefix):
|
if conf.allsets or ipset.startswith(conf.prefix):
|
||||||
destroy_ipset(conf, ipset)
|
destroy_ipset(conf, ipset)
|
||||||
|
|
||||||
LOG.info(_LI("IPset cleanup completed successfully"))
|
LOG.info("IPset cleanup completed successfully")
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
@ -16,7 +16,6 @@ from neutron_lib.utils import helpers
|
|||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from neutron._i18n import _LE, _LI
|
|
||||||
from neutron.common import config
|
from neutron.common import config
|
||||||
from neutron.plugins.ml2.drivers.linuxbridge.agent \
|
from neutron.plugins.ml2.drivers.linuxbridge.agent \
|
||||||
import linuxbridge_neutron_agent
|
import linuxbridge_neutron_agent
|
||||||
@ -30,17 +29,17 @@ def remove_empty_bridges():
|
|||||||
interface_mappings = helpers.parse_mappings(
|
interface_mappings = helpers.parse_mappings(
|
||||||
cfg.CONF.LINUX_BRIDGE.physical_interface_mappings)
|
cfg.CONF.LINUX_BRIDGE.physical_interface_mappings)
|
||||||
except ValueError as e:
|
except ValueError as e:
|
||||||
LOG.error(_LE("Parsing physical_interface_mappings failed: %s."), e)
|
LOG.error("Parsing physical_interface_mappings failed: %s.", e)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
LOG.info(_LI("Interface mappings: %s."), interface_mappings)
|
LOG.info("Interface mappings: %s.", interface_mappings)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
bridge_mappings = helpers.parse_mappings(
|
bridge_mappings = helpers.parse_mappings(
|
||||||
cfg.CONF.LINUX_BRIDGE.bridge_mappings)
|
cfg.CONF.LINUX_BRIDGE.bridge_mappings)
|
||||||
except ValueError as e:
|
except ValueError as e:
|
||||||
LOG.error(_LE("Parsing bridge_mappings failed: %s."), e)
|
LOG.error("Parsing bridge_mappings failed: %s.", e)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
LOG.info(_LI("Bridge mappings: %s."), bridge_mappings)
|
LOG.info("Bridge mappings: %s.", bridge_mappings)
|
||||||
|
|
||||||
lb_manager = linuxbridge_neutron_agent.LinuxBridgeManager(
|
lb_manager = linuxbridge_neutron_agent.LinuxBridgeManager(
|
||||||
bridge_mappings, interface_mappings)
|
bridge_mappings, interface_mappings)
|
||||||
@ -52,10 +51,10 @@ def remove_empty_bridges():
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
lb_manager.delete_bridge(bridge_name)
|
lb_manager.delete_bridge(bridge_name)
|
||||||
LOG.info(_LI("Linux bridge %s deleted"), bridge_name)
|
LOG.info("Linux bridge %s deleted", bridge_name)
|
||||||
except RuntimeError:
|
except RuntimeError:
|
||||||
LOG.exception(_LE("Linux bridge %s delete failed"), bridge_name)
|
LOG.exception("Linux bridge %s delete failed", bridge_name)
|
||||||
LOG.info(_LI("Linux bridge cleanup completed successfully"))
|
LOG.info("Linux bridge cleanup completed successfully")
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
@ -23,7 +23,6 @@ from oslo_config import cfg
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from oslo_utils import importutils
|
from oslo_utils import importutils
|
||||||
|
|
||||||
from neutron._i18n import _LE, _LW
|
|
||||||
from neutron.agent.common import ovs_lib
|
from neutron.agent.common import ovs_lib
|
||||||
from neutron.agent.l3 import dvr_fip_ns
|
from neutron.agent.l3 import dvr_fip_ns
|
||||||
from neutron.agent.l3 import dvr_snat_ns
|
from neutron.agent.l3 import dvr_snat_ns
|
||||||
@ -197,7 +196,7 @@ def _kill_listen_processes(namespace, force=False):
|
|||||||
# implementation in the right module. Ideally, netns_cleanup wouldn't
|
# implementation in the right module. Ideally, netns_cleanup wouldn't
|
||||||
# kill any processes as the responsible module should've killed them
|
# kill any processes as the responsible module should've killed them
|
||||||
# before cleaning up the namespace
|
# before cleaning up the namespace
|
||||||
LOG.warning(_LW("Killing (%(signal)d) [%(pid)s] %(cmdline)s"),
|
LOG.warning("Killing (%(signal)d) [%(pid)s] %(cmdline)s",
|
||||||
{'signal': kill_signal,
|
{'signal': kill_signal,
|
||||||
'pid': pid,
|
'pid': pid,
|
||||||
'cmdline': ' '.join(utils.get_cmdline_from_pid(pid))[:80]
|
'cmdline': ' '.join(utils.get_cmdline_from_pid(pid))[:80]
|
||||||
@ -205,8 +204,8 @@ def _kill_listen_processes(namespace, force=False):
|
|||||||
try:
|
try:
|
||||||
utils.kill_process(pid, kill_signal, run_as_root=True)
|
utils.kill_process(pid, kill_signal, run_as_root=True)
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
LOG.error(_LE('An error occurred while killing '
|
LOG.error('An error occurred while killing '
|
||||||
'[%(pid)s]: %(msg)s'), {'pid': pid, 'msg': ex})
|
'[%(pid)s]: %(msg)s', {'pid': pid, 'msg': ex})
|
||||||
return len(pids)
|
return len(pids)
|
||||||
|
|
||||||
|
|
||||||
@ -246,14 +245,14 @@ def destroy_namespace(conf, namespace, force=False):
|
|||||||
# This is unlikely since, at this point, we have SIGKILLed
|
# This is unlikely since, at this point, we have SIGKILLed
|
||||||
# all remaining processes but if there are still some, log
|
# all remaining processes but if there are still some, log
|
||||||
# the error and continue with the cleanup
|
# the error and continue with the cleanup
|
||||||
LOG.error(_LE('Not all processes were killed in %s'),
|
LOG.error('Not all processes were killed in %s',
|
||||||
namespace)
|
namespace)
|
||||||
for device in ip.get_devices():
|
for device in ip.get_devices():
|
||||||
unplug_device(conf, device)
|
unplug_device(conf, device)
|
||||||
|
|
||||||
ip.garbage_collect_namespace()
|
ip.garbage_collect_namespace()
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE('Error unable to destroy namespace: %s'), namespace)
|
LOG.exception('Error unable to destroy namespace: %s', namespace)
|
||||||
|
|
||||||
|
|
||||||
def cleanup_network_namespaces(conf):
|
def cleanup_network_namespaces(conf):
|
||||||
|
@ -16,7 +16,6 @@
|
|||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from neutron._i18n import _LI
|
|
||||||
from neutron.agent.common import ovs_lib
|
from neutron.agent.common import ovs_lib
|
||||||
from neutron.agent.linux import interface
|
from neutron.agent.linux import interface
|
||||||
from neutron.agent.linux import ip_lib
|
from neutron.agent.linux import ip_lib
|
||||||
@ -73,7 +72,7 @@ def delete_neutron_ports(ports):
|
|||||||
device = ip_lib.IPDevice(port)
|
device = ip_lib.IPDevice(port)
|
||||||
if device.exists():
|
if device.exists():
|
||||||
device.link.delete()
|
device.link.delete()
|
||||||
LOG.info(_LI("Deleting port: %s"), port)
|
LOG.info("Deleting port: %s", port)
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
@ -103,7 +102,7 @@ def main():
|
|||||||
ports = collect_neutron_ports(available_configuration_bridges)
|
ports = collect_neutron_ports(available_configuration_bridges)
|
||||||
|
|
||||||
for bridge in bridges:
|
for bridge in bridges:
|
||||||
LOG.info(_LI("Cleaning bridge: %s"), bridge)
|
LOG.info("Cleaning bridge: %s", bridge)
|
||||||
ovs = ovs_lib.OVSBridge(bridge)
|
ovs = ovs_lib.OVSBridge(bridge)
|
||||||
if conf.ovs_all_ports:
|
if conf.ovs_all_ports:
|
||||||
port_names = ovs.get_port_name_list()
|
port_names = ovs.get_port_name_list()
|
||||||
@ -115,4 +114,4 @@ def main():
|
|||||||
# Remove remaining ports created by Neutron (usually veth pair)
|
# Remove remaining ports created by Neutron (usually veth pair)
|
||||||
delete_neutron_ports(ports)
|
delete_neutron_ports(ports)
|
||||||
|
|
||||||
LOG.info(_LI("OVS cleanup completed successfully"))
|
LOG.info("OVS cleanup completed successfully")
|
||||||
|
@ -23,7 +23,6 @@ from oslo_config import cfg
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from oslo_utils import uuidutils
|
from oslo_utils import uuidutils
|
||||||
|
|
||||||
from neutron._i18n import _LE
|
|
||||||
from neutron.agent.common import ovs_lib
|
from neutron.agent.common import ovs_lib
|
||||||
from neutron.agent.l3 import ha_router
|
from neutron.agent.l3 import ha_router
|
||||||
from neutron.agent.l3 import namespaces
|
from neutron.agent.l3 import namespaces
|
||||||
@ -104,8 +103,8 @@ def ofctl_arg_supported(cmd, **kwargs):
|
|||||||
"command %s. Exception: %s", full_args, e)
|
"command %s. Exception: %s", full_args, e)
|
||||||
return False
|
return False
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Unexpected exception while checking supported"
|
LOG.exception("Unexpected exception while checking supported"
|
||||||
" feature via command: %s"), full_args)
|
" feature via command: %s", full_args)
|
||||||
return False
|
return False
|
||||||
else:
|
else:
|
||||||
return True
|
return True
|
||||||
@ -157,8 +156,8 @@ def _vf_management_support(required_caps):
|
|||||||
LOG.debug("ip link command does not support "
|
LOG.debug("ip link command does not support "
|
||||||
"vf capability '%(cap)s'", {'cap': cap})
|
"vf capability '%(cap)s'", {'cap': cap})
|
||||||
except ip_link_support.UnsupportedIpLinkCommand:
|
except ip_link_support.UnsupportedIpLinkCommand:
|
||||||
LOG.exception(_LE("Unexpected exception while checking supported "
|
LOG.exception("Unexpected exception while checking supported "
|
||||||
"ip link command"))
|
"ip link command")
|
||||||
return False
|
return False
|
||||||
return is_supported
|
return is_supported
|
||||||
|
|
||||||
@ -362,11 +361,11 @@ def ovsdb_native_supported():
|
|||||||
ovs.get_bridges()
|
ovs.get_bridges()
|
||||||
return True
|
return True
|
||||||
except ImportError as ex:
|
except ImportError as ex:
|
||||||
LOG.error(_LE("Failed to import required modules. Ensure that the "
|
LOG.error("Failed to import required modules. Ensure that the "
|
||||||
"python-openvswitch package is installed. Error: %s"),
|
"python-openvswitch package is installed. Error: %s",
|
||||||
ex)
|
ex)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Unexpected exception occurred."))
|
LOG.exception("Unexpected exception occurred.")
|
||||||
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
@ -18,7 +18,7 @@ import sys
|
|||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from neutron._i18n import _, _LE, _LW
|
from neutron._i18n import _
|
||||||
from neutron.agent import dhcp_agent
|
from neutron.agent import dhcp_agent
|
||||||
from neutron.cmd.sanity import checks
|
from neutron.cmd.sanity import checks
|
||||||
from neutron.common import config
|
from neutron.common import config
|
||||||
@ -52,52 +52,52 @@ class BoolOptCallback(cfg.BoolOpt):
|
|||||||
def check_ovs_vxlan():
|
def check_ovs_vxlan():
|
||||||
result = checks.ovs_vxlan_supported()
|
result = checks.ovs_vxlan_supported()
|
||||||
if not result:
|
if not result:
|
||||||
LOG.error(_LE('Check for Open vSwitch VXLAN support failed. '
|
LOG.error('Check for Open vSwitch VXLAN support failed. '
|
||||||
'Please ensure that the version of openvswitch '
|
'Please ensure that the version of openvswitch '
|
||||||
'being used has VXLAN support.'))
|
'being used has VXLAN support.')
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def check_ovs_geneve():
|
def check_ovs_geneve():
|
||||||
result = checks.ovs_geneve_supported()
|
result = checks.ovs_geneve_supported()
|
||||||
if not result:
|
if not result:
|
||||||
LOG.error(_LE('Check for Open vSwitch Geneve support failed. '
|
LOG.error('Check for Open vSwitch Geneve support failed. '
|
||||||
'Please ensure that the version of openvswitch '
|
'Please ensure that the version of openvswitch '
|
||||||
'and kernel being used has Geneve support.'))
|
'and kernel being used has Geneve support.')
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def check_iproute2_vxlan():
|
def check_iproute2_vxlan():
|
||||||
result = checks.iproute2_vxlan_supported()
|
result = checks.iproute2_vxlan_supported()
|
||||||
if not result:
|
if not result:
|
||||||
LOG.error(_LE('Check for iproute2 VXLAN support failed. Please ensure '
|
LOG.error('Check for iproute2 VXLAN support failed. Please ensure '
|
||||||
'that the iproute2 has VXLAN support.'))
|
'that the iproute2 has VXLAN support.')
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def check_ovs_patch():
|
def check_ovs_patch():
|
||||||
result = checks.patch_supported()
|
result = checks.patch_supported()
|
||||||
if not result:
|
if not result:
|
||||||
LOG.error(_LE('Check for Open vSwitch patch port support failed. '
|
LOG.error('Check for Open vSwitch patch port support failed. '
|
||||||
'Please ensure that the version of openvswitch '
|
'Please ensure that the version of openvswitch '
|
||||||
'being used has patch port support or disable features '
|
'being used has patch port support or disable features '
|
||||||
'requiring patch ports (gre/vxlan, etc.).'))
|
'requiring patch ports (gre/vxlan, etc.).')
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def check_read_netns():
|
def check_read_netns():
|
||||||
required = checks.netns_read_requires_helper()
|
required = checks.netns_read_requires_helper()
|
||||||
if not required and cfg.CONF.AGENT.use_helper_for_ns_read:
|
if not required and cfg.CONF.AGENT.use_helper_for_ns_read:
|
||||||
LOG.warning(_LW("The user that is executing neutron can read the "
|
LOG.warning("The user that is executing neutron can read the "
|
||||||
"namespaces without using the root_helper. Disable "
|
"namespaces without using the root_helper. Disable "
|
||||||
"the use_helper_for_ns_read option to avoid a "
|
"the use_helper_for_ns_read option to avoid a "
|
||||||
"performance impact."))
|
"performance impact.")
|
||||||
# Don't fail because nothing is actually broken. Just not optimal.
|
# Don't fail because nothing is actually broken. Just not optimal.
|
||||||
result = True
|
result = True
|
||||||
elif required and not cfg.CONF.AGENT.use_helper_for_ns_read:
|
elif required and not cfg.CONF.AGENT.use_helper_for_ns_read:
|
||||||
LOG.error(_LE("The user that is executing neutron does not have "
|
LOG.error("The user that is executing neutron does not have "
|
||||||
"permissions to read the namespaces. Enable the "
|
"permissions to read the namespaces. Enable the "
|
||||||
"use_helper_for_ns_read configuration option."))
|
"use_helper_for_ns_read configuration option.")
|
||||||
result = False
|
result = False
|
||||||
else:
|
else:
|
||||||
# everything is configured appropriately
|
# everything is configured appropriately
|
||||||
@ -112,8 +112,8 @@ def check_read_netns():
|
|||||||
def check_dnsmasq_version():
|
def check_dnsmasq_version():
|
||||||
result = checks.dnsmasq_version_supported()
|
result = checks.dnsmasq_version_supported()
|
||||||
if not result:
|
if not result:
|
||||||
LOG.error(_LE('The installed version of dnsmasq is too old. '
|
LOG.error('The installed version of dnsmasq is too old. '
|
||||||
'Please update to at least version %s.'),
|
'Please update to at least version %s.',
|
||||||
checks.get_minimal_dnsmasq_version_supported())
|
checks.get_minimal_dnsmasq_version_supported())
|
||||||
return result
|
return result
|
||||||
|
|
||||||
@ -121,17 +121,17 @@ def check_dnsmasq_version():
|
|||||||
def check_keepalived_ipv6_support():
|
def check_keepalived_ipv6_support():
|
||||||
result = checks.keepalived_ipv6_supported()
|
result = checks.keepalived_ipv6_supported()
|
||||||
if not result:
|
if not result:
|
||||||
LOG.error(_LE('The installed version of keepalived does not support '
|
LOG.error('The installed version of keepalived does not support '
|
||||||
'IPv6. Please update to at least version 1.2.10 for '
|
'IPv6. Please update to at least version 1.2.10 for '
|
||||||
'IPv6 support.'))
|
'IPv6 support.')
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def check_dibbler_version():
|
def check_dibbler_version():
|
||||||
result = checks.dibbler_version_supported()
|
result = checks.dibbler_version_supported()
|
||||||
if not result:
|
if not result:
|
||||||
LOG.error(_LE('The installed version of dibbler-client is too old. '
|
LOG.error('The installed version of dibbler-client is too old. '
|
||||||
'Please update to at least version %s.'),
|
'Please update to at least version %s.',
|
||||||
checks.get_minimal_dibbler_version_supported())
|
checks.get_minimal_dibbler_version_supported())
|
||||||
return result
|
return result
|
||||||
|
|
||||||
@ -139,56 +139,56 @@ def check_dibbler_version():
|
|||||||
def check_nova_notify():
|
def check_nova_notify():
|
||||||
result = checks.nova_notify_supported()
|
result = checks.nova_notify_supported()
|
||||||
if not result:
|
if not result:
|
||||||
LOG.error(_LE('Nova notifications are enabled, but novaclient is not '
|
LOG.error('Nova notifications are enabled, but novaclient is not '
|
||||||
'installed. Either disable nova notifications or '
|
'installed. Either disable nova notifications or '
|
||||||
'install python-novaclient.'))
|
'install python-novaclient.')
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def check_arp_responder():
|
def check_arp_responder():
|
||||||
result = checks.arp_responder_supported()
|
result = checks.arp_responder_supported()
|
||||||
if not result:
|
if not result:
|
||||||
LOG.error(_LE('Check for Open vSwitch ARP responder support failed. '
|
LOG.error('Check for Open vSwitch ARP responder support failed. '
|
||||||
'Please ensure that the version of openvswitch '
|
'Please ensure that the version of openvswitch '
|
||||||
'being used has ARP flows support.'))
|
'being used has ARP flows support.')
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def check_arp_header_match():
|
def check_arp_header_match():
|
||||||
result = checks.arp_header_match_supported()
|
result = checks.arp_header_match_supported()
|
||||||
if not result:
|
if not result:
|
||||||
LOG.error(_LE('Check for Open vSwitch support of ARP header matching '
|
LOG.error('Check for Open vSwitch support of ARP header matching '
|
||||||
'failed. ARP spoofing suppression will not work. A '
|
'failed. ARP spoofing suppression will not work. A '
|
||||||
'newer version of OVS is required.'))
|
'newer version of OVS is required.')
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def check_icmpv6_header_match():
|
def check_icmpv6_header_match():
|
||||||
result = checks.icmpv6_header_match_supported()
|
result = checks.icmpv6_header_match_supported()
|
||||||
if not result:
|
if not result:
|
||||||
LOG.error(_LE('Check for Open vSwitch support of ICMPv6 header '
|
LOG.error('Check for Open vSwitch support of ICMPv6 header '
|
||||||
'matching failed. ICMPv6 Neighbor Advt spoofing (part '
|
'matching failed. ICMPv6 Neighbor Advt spoofing (part '
|
||||||
'of arp spoofing) suppression will not work. A newer '
|
'of arp spoofing) suppression will not work. A newer '
|
||||||
'version of OVS is required.'))
|
'version of OVS is required.')
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def check_vf_management():
|
def check_vf_management():
|
||||||
result = checks.vf_management_supported()
|
result = checks.vf_management_supported()
|
||||||
if not result:
|
if not result:
|
||||||
LOG.error(_LE('Check for VF management support failed. '
|
LOG.error('Check for VF management support failed. '
|
||||||
'Please ensure that the version of ip link '
|
'Please ensure that the version of ip link '
|
||||||
'being used has VF support.'))
|
'being used has VF support.')
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def check_vf_extended_management():
|
def check_vf_extended_management():
|
||||||
result = checks.vf_extended_management_supported()
|
result = checks.vf_extended_management_supported()
|
||||||
if not result:
|
if not result:
|
||||||
LOG.error(_LE('Check for VF extended management support failed. '
|
LOG.error('Check for VF extended management support failed. '
|
||||||
'Please ensure that the version of ip link '
|
'Please ensure that the version of ip link '
|
||||||
'being used has VF extended support: version '
|
'being used has VF extended support: version '
|
||||||
'"iproute2-ss140804", git tag "v3.16.0"'))
|
'"iproute2-ss140804", git tag "v3.16.0"')
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
@ -196,67 +196,67 @@ def check_ovsdb_native():
|
|||||||
cfg.CONF.set_override('ovsdb_interface', 'native', group='OVS')
|
cfg.CONF.set_override('ovsdb_interface', 'native', group='OVS')
|
||||||
result = checks.ovsdb_native_supported()
|
result = checks.ovsdb_native_supported()
|
||||||
if not result:
|
if not result:
|
||||||
LOG.error(_LE('Check for native OVSDB support failed.'))
|
LOG.error('Check for native OVSDB support failed.')
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def check_ovs_conntrack():
|
def check_ovs_conntrack():
|
||||||
result = checks.ovs_conntrack_supported()
|
result = checks.ovs_conntrack_supported()
|
||||||
if not result:
|
if not result:
|
||||||
LOG.error(_LE('Check for Open vSwitch support of conntrack support '
|
LOG.error('Check for Open vSwitch support of conntrack support '
|
||||||
'failed. OVS/CT firewall will not work. A newer '
|
'failed. OVS/CT firewall will not work. A newer '
|
||||||
'version of OVS (2.5+) and linux kernel (4.3+) are '
|
'version of OVS (2.5+) and linux kernel (4.3+) are '
|
||||||
'required. See '
|
'required. See '
|
||||||
'https://github.com/openvswitch/ovs/blob/master/FAQ.md '
|
'https://github.com/openvswitch/ovs/blob/master/FAQ.md '
|
||||||
'for more information.'))
|
'for more information.')
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def check_ebtables():
|
def check_ebtables():
|
||||||
result = checks.ebtables_supported()
|
result = checks.ebtables_supported()
|
||||||
if not result:
|
if not result:
|
||||||
LOG.error(_LE('Cannot run ebtables. Please ensure that it '
|
LOG.error('Cannot run ebtables. Please ensure that it '
|
||||||
'is installed.'))
|
'is installed.')
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def check_ipset():
|
def check_ipset():
|
||||||
result = checks.ipset_supported()
|
result = checks.ipset_supported()
|
||||||
if not result:
|
if not result:
|
||||||
LOG.error(_LE('Cannot run ipset. Please ensure that it '
|
LOG.error('Cannot run ipset. Please ensure that it '
|
||||||
'is installed.'))
|
'is installed.')
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def check_ip6tables():
|
def check_ip6tables():
|
||||||
result = checks.ip6tables_supported()
|
result = checks.ip6tables_supported()
|
||||||
if not result:
|
if not result:
|
||||||
LOG.error(_LE('Cannot run ip6tables. Please ensure that it '
|
LOG.error('Cannot run ip6tables. Please ensure that it '
|
||||||
'is installed.'))
|
'is installed.')
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def check_conntrack():
|
def check_conntrack():
|
||||||
result = checks.conntrack_supported()
|
result = checks.conntrack_supported()
|
||||||
if not result:
|
if not result:
|
||||||
LOG.error(_LE('Cannot run conntrack. Please ensure that it '
|
LOG.error('Cannot run conntrack. Please ensure that it '
|
||||||
'is installed.'))
|
'is installed.')
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def check_dhcp_release6():
|
def check_dhcp_release6():
|
||||||
result = checks.dhcp_release6_supported()
|
result = checks.dhcp_release6_supported()
|
||||||
if not result:
|
if not result:
|
||||||
LOG.error(_LE('No dhcp_release6 tool detected. The installed version '
|
LOG.error('No dhcp_release6 tool detected. The installed version '
|
||||||
'of dnsmasq does not support releasing IPv6 leases. '
|
'of dnsmasq does not support releasing IPv6 leases. '
|
||||||
'Please update to at least version %s if you need this '
|
'Please update to at least version %s if you need this '
|
||||||
'feature. If you do not use IPv6 stateful subnets you '
|
'feature. If you do not use IPv6 stateful subnets you '
|
||||||
'can continue to use this version of dnsmasq, as '
|
'can continue to use this version of dnsmasq, as '
|
||||||
'other IPv6 address assignment mechanisms besides '
|
'other IPv6 address assignment mechanisms besides '
|
||||||
'stateful DHCPv6 should continue to work without '
|
'stateful DHCPv6 should continue to work without '
|
||||||
'the dhcp_release6 utility. '
|
'the dhcp_release6 utility. '
|
||||||
'Current version of dnsmasq is ok if other checks '
|
'Current version of dnsmasq is ok if other checks '
|
||||||
'pass.'),
|
'pass.',
|
||||||
checks.get_dnsmasq_version_with_dhcp_release6())
|
checks.get_dnsmasq_version_with_dhcp_release6())
|
||||||
return result
|
return result
|
||||||
|
|
||||||
@ -264,19 +264,19 @@ def check_dhcp_release6():
|
|||||||
def check_bridge_firewalling_enabled():
|
def check_bridge_firewalling_enabled():
|
||||||
result = checks.bridge_firewalling_enabled()
|
result = checks.bridge_firewalling_enabled()
|
||||||
if not result:
|
if not result:
|
||||||
LOG.error(_LE('Bridge firewalling is not enabled. It may be the case '
|
LOG.error('Bridge firewalling is not enabled. It may be the case '
|
||||||
'that bridge and/or br_netfilter kernel modules are not '
|
'that bridge and/or br_netfilter kernel modules are not '
|
||||||
'loaded. Alternatively, corresponding sysctl settings '
|
'loaded. Alternatively, corresponding sysctl settings '
|
||||||
'may be overridden to disable it by default.'))
|
'may be overridden to disable it by default.')
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def check_ip_nonlocal_bind():
|
def check_ip_nonlocal_bind():
|
||||||
result = checks.ip_nonlocal_bind()
|
result = checks.ip_nonlocal_bind()
|
||||||
if not result:
|
if not result:
|
||||||
LOG.error(_LE('This kernel does not isolate ip_nonlocal_bind kernel '
|
LOG.error('This kernel does not isolate ip_nonlocal_bind kernel '
|
||||||
'option in namespaces. Please update to kernel '
|
'option in namespaces. Please update to kernel '
|
||||||
'version > 3.19.'))
|
'version > 3.19.')
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
@ -27,7 +27,7 @@ import oslo_messaging
|
|||||||
from oslo_middleware import cors
|
from oslo_middleware import cors
|
||||||
from oslo_service import wsgi
|
from oslo_service import wsgi
|
||||||
|
|
||||||
from neutron._i18n import _, _LI
|
from neutron._i18n import _
|
||||||
from neutron.conf import common as common_config
|
from neutron.conf import common as common_config
|
||||||
from neutron import policy
|
from neutron import policy
|
||||||
from neutron import version
|
from neutron import version
|
||||||
@ -97,8 +97,8 @@ def setup_logging():
|
|||||||
logging.set_defaults(default_log_levels=logging.get_default_log_levels() +
|
logging.set_defaults(default_log_levels=logging.get_default_log_levels() +
|
||||||
EXTRA_LOG_LEVEL_DEFAULTS)
|
EXTRA_LOG_LEVEL_DEFAULTS)
|
||||||
logging.setup(cfg.CONF, product_name)
|
logging.setup(cfg.CONF, product_name)
|
||||||
LOG.info(_LI("Logging enabled!"))
|
LOG.info("Logging enabled!")
|
||||||
LOG.info(_LI("%(prog)s version %(version)s"),
|
LOG.info("%(prog)s version %(version)s",
|
||||||
{'prog': sys.argv[0],
|
{'prog': sys.argv[0],
|
||||||
'version': version.version_info.release_string()})
|
'version': version.version_info.release_string()})
|
||||||
LOG.debug("command line: %s", " ".join(sys.argv))
|
LOG.debug("command line: %s", " ".join(sys.argv))
|
||||||
|
@ -23,8 +23,6 @@ import netaddr
|
|||||||
from neutron_lib import constants as const
|
from neutron_lib import constants as const
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
|
|
||||||
from neutron._i18n import _LI
|
|
||||||
|
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
_IS_IPV6_ENABLED = None
|
_IS_IPV6_ENABLED = None
|
||||||
@ -45,10 +43,10 @@ def is_enabled_and_bind_by_default():
|
|||||||
else:
|
else:
|
||||||
_IS_IPV6_ENABLED = False
|
_IS_IPV6_ENABLED = False
|
||||||
if not _IS_IPV6_ENABLED:
|
if not _IS_IPV6_ENABLED:
|
||||||
LOG.info(_LI("IPv6 not present or configured not to bind to new "
|
LOG.info("IPv6 not present or configured not to bind to new "
|
||||||
"interfaces on this system. Please ensure IPv6 is "
|
"interfaces on this system. Please ensure IPv6 is "
|
||||||
"enabled and /proc/sys/net/ipv6/conf/default/"
|
"enabled and /proc/sys/net/ipv6/conf/default/"
|
||||||
"disable_ipv6 is set to 0 to enable IPv6."))
|
"disable_ipv6 is set to 0 to enable IPv6.")
|
||||||
return _IS_IPV6_ENABLED
|
return _IS_IPV6_ENABLED
|
||||||
|
|
||||||
|
|
||||||
|
@ -17,8 +17,6 @@ import osprofiler.initializer
|
|||||||
from osprofiler import opts as profiler_opts
|
from osprofiler import opts as profiler_opts
|
||||||
import osprofiler.web
|
import osprofiler.web
|
||||||
|
|
||||||
from neutron._i18n import _LI
|
|
||||||
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
profiler_opts.set_defaults(CONF)
|
profiler_opts.set_defaults(CONF)
|
||||||
@ -41,11 +39,11 @@ def setup(name, host='0.0.0.0'): # nosec
|
|||||||
service=name,
|
service=name,
|
||||||
host=host
|
host=host
|
||||||
)
|
)
|
||||||
LOG.info(_LI("OSProfiler is enabled.\n"
|
LOG.info("OSProfiler is enabled.\n"
|
||||||
"Traces provided from the profiler "
|
"Traces provided from the profiler "
|
||||||
"can only be subscribed to using the same HMAC keys that "
|
"can only be subscribed to using the same HMAC keys that "
|
||||||
"are configured in Neutron's configuration file "
|
"are configured in Neutron's configuration file "
|
||||||
"under the [profiler] section.\n To disable OSprofiler "
|
"under the [profiler] section.\n To disable OSprofiler "
|
||||||
"set in /etc/neutron/neutron.conf:\n"
|
"set in /etc/neutron/neutron.conf:\n"
|
||||||
"[profiler]\n"
|
"[profiler]\n"
|
||||||
"enabled=false"))
|
"enabled=false")
|
||||||
|
@ -29,7 +29,6 @@ from oslo_service import service
|
|||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
from osprofiler import profiler
|
from osprofiler import profiler
|
||||||
|
|
||||||
from neutron._i18n import _LE, _LW
|
|
||||||
from neutron.common import exceptions
|
from neutron.common import exceptions
|
||||||
|
|
||||||
|
|
||||||
@ -168,19 +167,19 @@ class _BackingOffContextWrapper(_ContextWrapper):
|
|||||||
min(self._METHOD_TIMEOUTS[scoped_method],
|
min(self._METHOD_TIMEOUTS[scoped_method],
|
||||||
TRANSPORT.conf.rpc_response_timeout)
|
TRANSPORT.conf.rpc_response_timeout)
|
||||||
)
|
)
|
||||||
LOG.error(_LE("Timeout in RPC method %(method)s. Waiting for "
|
LOG.error("Timeout in RPC method %(method)s. Waiting for "
|
||||||
"%(wait)s seconds before next attempt. If the "
|
"%(wait)s seconds before next attempt. If the "
|
||||||
"server is not down, consider increasing the "
|
"server is not down, consider increasing the "
|
||||||
"rpc_response_timeout option as Neutron "
|
"rpc_response_timeout option as Neutron "
|
||||||
"server(s) may be overloaded and unable to "
|
"server(s) may be overloaded and unable to "
|
||||||
"respond quickly enough."),
|
"respond quickly enough.",
|
||||||
{'wait': int(round(wait)), 'method': scoped_method})
|
{'wait': int(round(wait)), 'method': scoped_method})
|
||||||
new_timeout = min(
|
new_timeout = min(
|
||||||
self._original_context.timeout * 2, self.get_max_timeout())
|
self._original_context.timeout * 2, self.get_max_timeout())
|
||||||
if new_timeout > self._METHOD_TIMEOUTS[scoped_method]:
|
if new_timeout > self._METHOD_TIMEOUTS[scoped_method]:
|
||||||
LOG.warning(_LW("Increasing timeout for %(method)s calls "
|
LOG.warning("Increasing timeout for %(method)s calls "
|
||||||
"to %(new)s seconds. Restart the agent to "
|
"to %(new)s seconds. Restart the agent to "
|
||||||
"restore it to the default value."),
|
"restore it to the default value.",
|
||||||
{'method': scoped_method, 'new': new_timeout})
|
{'method': scoped_method, 'new': new_timeout})
|
||||||
self._METHOD_TIMEOUTS[scoped_method] = new_timeout
|
self._METHOD_TIMEOUTS[scoped_method] = new_timeout
|
||||||
time.sleep(wait)
|
time.sleep(wait)
|
||||||
|
@ -49,7 +49,7 @@ import six
|
|||||||
from stevedore import driver
|
from stevedore import driver
|
||||||
|
|
||||||
import neutron
|
import neutron
|
||||||
from neutron._i18n import _, _LE
|
from neutron._i18n import _
|
||||||
from neutron.db import api as db_api
|
from neutron.db import api as db_api
|
||||||
|
|
||||||
TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
|
TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
|
||||||
@ -307,7 +307,7 @@ def load_class_by_alias_or_classname(namespace, name):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
if not name:
|
if not name:
|
||||||
LOG.error(_LE("Alias or class name is not set"))
|
LOG.error("Alias or class name is not set")
|
||||||
raise ImportError(_("Class not found."))
|
raise ImportError(_("Class not found."))
|
||||||
try:
|
try:
|
||||||
# Try to resolve class by alias
|
# Try to resolve class by alias
|
||||||
@ -320,9 +320,9 @@ def load_class_by_alias_or_classname(namespace, name):
|
|||||||
try:
|
try:
|
||||||
class_to_load = importutils.import_class(name)
|
class_to_load = importutils.import_class(name)
|
||||||
except (ImportError, ValueError):
|
except (ImportError, ValueError):
|
||||||
LOG.error(_LE("Error loading class by alias"),
|
LOG.error("Error loading class by alias",
|
||||||
exc_info=e1_info)
|
exc_info=e1_info)
|
||||||
LOG.error(_LE("Error loading class by class name"),
|
LOG.error("Error loading class by class name",
|
||||||
exc_info=True)
|
exc_info=True)
|
||||||
raise ImportError(_("Class not found."))
|
raise ImportError(_("Class not found."))
|
||||||
return class_to_load
|
return class_to_load
|
||||||
@ -636,7 +636,7 @@ def create_object_with_dependency(creator, dep_getter, dep_creator,
|
|||||||
try:
|
try:
|
||||||
dep_deleter(dependency)
|
dep_deleter(dependency)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Failed cleaning up dependency %s"),
|
LOG.exception("Failed cleaning up dependency %s",
|
||||||
dep_id)
|
dep_id)
|
||||||
return result, dependency
|
return result, dependency
|
||||||
|
|
||||||
@ -743,7 +743,7 @@ def attach_exc_details(e, msg, args=_NO_ARGS_MARKER):
|
|||||||
def extract_exc_details(e):
|
def extract_exc_details(e):
|
||||||
for attr in ('_error_context_msg', '_error_context_args'):
|
for attr in ('_error_context_msg', '_error_context_args'):
|
||||||
if not hasattr(e, attr):
|
if not hasattr(e, attr):
|
||||||
return _LE('No details.')
|
return u'No details.'
|
||||||
details = e._error_context_msg
|
details = e._error_context_msg
|
||||||
args = e._error_context_args
|
args = e._error_context_args
|
||||||
if args is _NO_ARGS_MARKER:
|
if args is _NO_ARGS_MARKER:
|
||||||
|
@ -22,7 +22,6 @@ from oslo_log import log as logging
|
|||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
from sqlalchemy.ext import associationproxy
|
from sqlalchemy.ext import associationproxy
|
||||||
|
|
||||||
from neutron._i18n import _LE
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -67,9 +66,9 @@ def safe_creation(context, create_fn, delete_fn, create_bindings,
|
|||||||
try:
|
try:
|
||||||
delete_fn(obj['id'])
|
delete_fn(obj['id'])
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Cannot clean up created object %(obj)s. "
|
LOG.error("Cannot clean up created object %(obj)s. "
|
||||||
"Exception: %(exc)s"), {'obj': obj['id'],
|
"Exception: %(exc)s", {'obj': obj['id'],
|
||||||
'exc': e})
|
'exc': e})
|
||||||
return obj, value
|
return obj, value
|
||||||
|
|
||||||
|
|
||||||
|
@ -33,7 +33,7 @@ from oslo_utils import timeutils
|
|||||||
from sqlalchemy.orm import exc
|
from sqlalchemy.orm import exc
|
||||||
from sqlalchemy import sql
|
from sqlalchemy import sql
|
||||||
|
|
||||||
from neutron._i18n import _, _LE, _LI, _LW
|
from neutron._i18n import _
|
||||||
from neutron.agent.common import utils
|
from neutron.agent.common import utils
|
||||||
from neutron.api.rpc.callbacks import version_manager
|
from neutron.api.rpc.callbacks import version_manager
|
||||||
from neutron.common import constants as n_const
|
from neutron.common import constants as n_const
|
||||||
@ -163,7 +163,7 @@ class AgentDbMixin(ext_agent.AgentPluginBase, AgentAvailabilityZoneMixin):
|
|||||||
return
|
return
|
||||||
|
|
||||||
if utils.is_agent_down(agent.heartbeat_timestamp):
|
if utils.is_agent_down(agent.heartbeat_timestamp):
|
||||||
LOG.warning(_LW('%(agent_type)s agent %(agent_id)s is not active'),
|
LOG.warning('%(agent_type)s agent %(agent_id)s is not active',
|
||||||
{'agent_type': agent_type, 'agent_id': agent.id})
|
{'agent_type': agent_type, 'agent_id': agent.id})
|
||||||
return agent
|
return agent
|
||||||
|
|
||||||
@ -193,8 +193,8 @@ class AgentDbMixin(ext_agent.AgentPluginBase, AgentAvailabilityZoneMixin):
|
|||||||
conf = jsonutils.loads(json_value)
|
conf = jsonutils.loads(json_value)
|
||||||
except Exception:
|
except Exception:
|
||||||
if json_value or not ignore_missing:
|
if json_value or not ignore_missing:
|
||||||
msg = _LW('Dictionary %(dict_name)s for agent %(agent_type)s '
|
msg = ('Dictionary %(dict_name)s for agent %(agent_type)s '
|
||||||
'on host %(host)s is invalid.')
|
'on host %(host)s is invalid.')
|
||||||
LOG.warning(msg, {'dict_name': dict_name,
|
LOG.warning(msg, {'dict_name': dict_name,
|
||||||
'agent_type': agent_db.agent_type,
|
'agent_type': agent_db.agent_type,
|
||||||
'host': agent_db.host})
|
'host': agent_db.host})
|
||||||
@ -271,8 +271,8 @@ class AgentDbMixin(ext_agent.AgentPluginBase, AgentAvailabilityZoneMixin):
|
|||||||
(agent['agent_type'],
|
(agent['agent_type'],
|
||||||
agent['heartbeat_timestamp'],
|
agent['heartbeat_timestamp'],
|
||||||
agent['host']) for agent in dead_agents])
|
agent['host']) for agent in dead_agents])
|
||||||
LOG.warning(_LW("Agent healthcheck: found %(count)s dead agents "
|
LOG.warning("Agent healthcheck: found %(count)s dead agents "
|
||||||
"out of %(total)s:\n%(data)s"),
|
"out of %(total)s:\n%(data)s",
|
||||||
{'count': len(dead_agents),
|
{'count': len(dead_agents),
|
||||||
'total': len(agents),
|
'total': len(agents),
|
||||||
'data': data})
|
'data': data})
|
||||||
@ -314,8 +314,8 @@ class AgentDbMixin(ext_agent.AgentPluginBase, AgentAvailabilityZoneMixin):
|
|||||||
def _log_heartbeat(self, state, agent_db, agent_conf):
|
def _log_heartbeat(self, state, agent_db, agent_conf):
|
||||||
if agent_conf.get('log_agent_heartbeats'):
|
if agent_conf.get('log_agent_heartbeats'):
|
||||||
delta = timeutils.utcnow() - agent_db.heartbeat_timestamp
|
delta = timeutils.utcnow() - agent_db.heartbeat_timestamp
|
||||||
LOG.info(_LI("Heartbeat received from %(type)s agent on "
|
LOG.info("Heartbeat received from %(type)s agent on "
|
||||||
"host %(host)s, uuid %(uuid)s after %(delta)s"),
|
"host %(host)s, uuid %(uuid)s after %(delta)s",
|
||||||
{'type': agent_db.agent_type,
|
{'type': agent_db.agent_type,
|
||||||
'host': agent_db.host,
|
'host': agent_db.host,
|
||||||
'uuid': state.get('uuid'),
|
'uuid': state.get('uuid'),
|
||||||
@ -492,10 +492,10 @@ class AgentExtRpcCallback(object):
|
|||||||
'serv_time': (datetime.datetime.isoformat
|
'serv_time': (datetime.datetime.isoformat
|
||||||
(time_server_now)),
|
(time_server_now)),
|
||||||
'diff': diff}
|
'diff': diff}
|
||||||
LOG.error(_LE("Message received from the host: %(host)s "
|
LOG.error("Message received from the host: %(host)s "
|
||||||
"during the registration of %(agent_name)s has "
|
"during the registration of %(agent_name)s has "
|
||||||
"a timestamp: %(agent_time)s. This differs from "
|
"a timestamp: %(agent_time)s. This differs from "
|
||||||
"the current server timestamp: %(serv_time)s by "
|
"the current server timestamp: %(serv_time)s by "
|
||||||
"%(diff)s seconds, which is more than the "
|
"%(diff)s seconds, which is more than the "
|
||||||
"threshold agent down"
|
"threshold agent down"
|
||||||
"time: %(threshold)s."), log_dict)
|
"time: %(threshold)s.", log_dict)
|
||||||
|
@ -26,7 +26,7 @@ from oslo_utils import timeutils
|
|||||||
from sqlalchemy import orm
|
from sqlalchemy import orm
|
||||||
from sqlalchemy.orm import exc
|
from sqlalchemy.orm import exc
|
||||||
|
|
||||||
from neutron._i18n import _, _LE, _LI, _LW
|
from neutron._i18n import _
|
||||||
from neutron.agent.common import utils as agent_utils
|
from neutron.agent.common import utils as agent_utils
|
||||||
from neutron.common import constants as n_const
|
from neutron.common import constants as n_const
|
||||||
from neutron.common import utils
|
from neutron.common import utils
|
||||||
@ -130,10 +130,10 @@ class AgentSchedulerDbMixin(agents_db.AgentDbMixin):
|
|||||||
tdelta = timeutils.utcnow() - getattr(self, '_clock_jump_canary',
|
tdelta = timeutils.utcnow() - getattr(self, '_clock_jump_canary',
|
||||||
timeutils.utcnow())
|
timeutils.utcnow())
|
||||||
if tdelta.total_seconds() > cfg.CONF.agent_down_time:
|
if tdelta.total_seconds() > cfg.CONF.agent_down_time:
|
||||||
LOG.warning(_LW("Time since last %s agent reschedule check has "
|
LOG.warning("Time since last %s agent reschedule check has "
|
||||||
"exceeded the interval between checks. Waiting "
|
"exceeded the interval between checks. Waiting "
|
||||||
"before check to allow agents to send a heartbeat "
|
"before check to allow agents to send a heartbeat "
|
||||||
"in case there was a clock adjustment."),
|
"in case there was a clock adjustment.",
|
||||||
agent_type)
|
agent_type)
|
||||||
time.sleep(agent_dead_limit)
|
time.sleep(agent_dead_limit)
|
||||||
self._clock_jump_canary = timeutils.utcnow()
|
self._clock_jump_canary = timeutils.utcnow()
|
||||||
@ -176,10 +176,10 @@ class AgentSchedulerDbMixin(agents_db.AgentDbMixin):
|
|||||||
agents_back_online.add(binding_agent_id)
|
agents_back_online.add(binding_agent_id)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
LOG.warning(_LW(
|
LOG.warning(
|
||||||
"Rescheduling %(resource_name)s %(resource)s from agent "
|
"Rescheduling %(resource_name)s %(resource)s from agent "
|
||||||
"%(agent)s because the agent did not report to the server "
|
"%(agent)s because the agent did not report to the server "
|
||||||
"in the last %(dead_time)s seconds."),
|
"in the last %(dead_time)s seconds.",
|
||||||
{'resource_name': resource_name,
|
{'resource_name': resource_name,
|
||||||
'resource': binding_resource_id,
|
'resource': binding_resource_id,
|
||||||
'agent': binding_agent_id,
|
'agent': binding_agent_id,
|
||||||
@ -189,15 +189,15 @@ class AgentSchedulerDbMixin(agents_db.AgentDbMixin):
|
|||||||
except (rescheduling_failed, oslo_messaging.RemoteError):
|
except (rescheduling_failed, oslo_messaging.RemoteError):
|
||||||
# Catch individual rescheduling errors here
|
# Catch individual rescheduling errors here
|
||||||
# so one broken one doesn't stop the iteration.
|
# so one broken one doesn't stop the iteration.
|
||||||
LOG.exception(_LE("Failed to reschedule %(resource_name)s "
|
LOG.exception("Failed to reschedule %(resource_name)s "
|
||||||
"%(resource)s"),
|
"%(resource)s",
|
||||||
{'resource_name': resource_name,
|
{'resource_name': resource_name,
|
||||||
'resource': binding_resource_id})
|
'resource': binding_resource_id})
|
||||||
except Exception:
|
except Exception:
|
||||||
# we want to be thorough and catch whatever is raised
|
# we want to be thorough and catch whatever is raised
|
||||||
# to avoid loop abortion
|
# to avoid loop abortion
|
||||||
LOG.exception(_LE("Exception encountered during %(resource_name)s "
|
LOG.exception("Exception encountered during %(resource_name)s "
|
||||||
"rescheduling."),
|
"rescheduling.",
|
||||||
{'resource_name': resource_name})
|
{'resource_name': resource_name})
|
||||||
|
|
||||||
|
|
||||||
@ -211,8 +211,8 @@ class DhcpAgentSchedulerDbMixin(dhcpagentscheduler
|
|||||||
|
|
||||||
def add_periodic_dhcp_agent_status_check(self):
|
def add_periodic_dhcp_agent_status_check(self):
|
||||||
if not cfg.CONF.allow_automatic_dhcp_failover:
|
if not cfg.CONF.allow_automatic_dhcp_failover:
|
||||||
LOG.info(_LI("Skipping periodic DHCP agent status check because "
|
LOG.info("Skipping periodic DHCP agent status check because "
|
||||||
"automatic network rescheduling is disabled."))
|
"automatic network rescheduling is disabled.")
|
||||||
return
|
return
|
||||||
|
|
||||||
self.add_agent_status_check_worker(
|
self.add_agent_status_check_worker(
|
||||||
@ -249,23 +249,23 @@ class DhcpAgentSchedulerDbMixin(dhcpagentscheduler
|
|||||||
return agent_expected_up > timeutils.utcnow()
|
return agent_expected_up > timeutils.utcnow()
|
||||||
|
|
||||||
def _schedule_network(self, context, network_id, dhcp_notifier):
|
def _schedule_network(self, context, network_id, dhcp_notifier):
|
||||||
LOG.info(_LI("Scheduling unhosted network %s"), network_id)
|
LOG.info("Scheduling unhosted network %s", network_id)
|
||||||
try:
|
try:
|
||||||
# TODO(enikanorov): have to issue redundant db query
|
# TODO(enikanorov): have to issue redundant db query
|
||||||
# to satisfy scheduling interface
|
# to satisfy scheduling interface
|
||||||
network = self.get_network(context, network_id)
|
network = self.get_network(context, network_id)
|
||||||
agents = self.schedule_network(context, network)
|
agents = self.schedule_network(context, network)
|
||||||
if not agents:
|
if not agents:
|
||||||
LOG.info(_LI("Failed to schedule network %s, "
|
LOG.info("Failed to schedule network %s, "
|
||||||
"no eligible agents or it might be "
|
"no eligible agents or it might be "
|
||||||
"already scheduled by another server"),
|
"already scheduled by another server",
|
||||||
network_id)
|
network_id)
|
||||||
return
|
return
|
||||||
if not dhcp_notifier:
|
if not dhcp_notifier:
|
||||||
return
|
return
|
||||||
for agent in agents:
|
for agent in agents:
|
||||||
LOG.info(_LI("Adding network %(net)s to agent "
|
LOG.info("Adding network %(net)s to agent "
|
||||||
"%(agent)s on host %(host)s"),
|
"%(agent)s on host %(host)s",
|
||||||
{'net': network_id,
|
{'net': network_id,
|
||||||
'agent': agent.id,
|
'agent': agent.id,
|
||||||
'host': agent.host})
|
'host': agent.host})
|
||||||
@ -275,7 +275,7 @@ class DhcpAgentSchedulerDbMixin(dhcpagentscheduler
|
|||||||
# catching any exception during scheduling
|
# catching any exception during scheduling
|
||||||
# so if _schedule_network is invoked in the loop it could
|
# so if _schedule_network is invoked in the loop it could
|
||||||
# continue in any case
|
# continue in any case
|
||||||
LOG.exception(_LE("Failed to schedule network %s"), network_id)
|
LOG.exception("Failed to schedule network %s", network_id)
|
||||||
|
|
||||||
def _filter_bindings(self, context, bindings):
|
def _filter_bindings(self, context, bindings):
|
||||||
"""Skip bindings for which the agent is dead, but starting up."""
|
"""Skip bindings for which the agent is dead, but starting up."""
|
||||||
@ -332,14 +332,14 @@ class DhcpAgentSchedulerDbMixin(dhcpagentscheduler
|
|||||||
active_agents = [agent for agent in agents if
|
active_agents = [agent for agent in agents if
|
||||||
self.is_eligible_agent(context, True, agent)]
|
self.is_eligible_agent(context, True, agent)]
|
||||||
if not active_agents:
|
if not active_agents:
|
||||||
LOG.warning(_LW("No DHCP agents available, "
|
LOG.warning("No DHCP agents available, "
|
||||||
"skipping rescheduling"))
|
"skipping rescheduling")
|
||||||
return
|
return
|
||||||
for binding in dead_bindings:
|
for binding in dead_bindings:
|
||||||
LOG.warning(_LW("Removing network %(network)s from agent "
|
LOG.warning("Removing network %(network)s from agent "
|
||||||
"%(agent)s because the agent did not report "
|
"%(agent)s because the agent did not report "
|
||||||
"to the server in the last %(dead_time)s "
|
"to the server in the last %(dead_time)s "
|
||||||
"seconds."),
|
"seconds.",
|
||||||
{'network': binding.network_id,
|
{'network': binding.network_id,
|
||||||
'agent': binding.dhcp_agent_id,
|
'agent': binding.dhcp_agent_id,
|
||||||
'dead_time': agent_dead_limit})
|
'dead_time': agent_dead_limit})
|
||||||
@ -362,9 +362,9 @@ class DhcpAgentSchedulerDbMixin(dhcpagentscheduler
|
|||||||
saved_binding)
|
saved_binding)
|
||||||
# still continue and allow concurrent scheduling attempt
|
# still continue and allow concurrent scheduling attempt
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Unexpected exception occurred while "
|
LOG.exception("Unexpected exception occurred while "
|
||||||
"removing network %(net)s from agent "
|
"removing network %(net)s from agent "
|
||||||
"%(agent)s"),
|
"%(agent)s",
|
||||||
saved_binding)
|
saved_binding)
|
||||||
|
|
||||||
if cfg.CONF.network_auto_schedule:
|
if cfg.CONF.network_auto_schedule:
|
||||||
@ -373,8 +373,8 @@ class DhcpAgentSchedulerDbMixin(dhcpagentscheduler
|
|||||||
except Exception:
|
except Exception:
|
||||||
# we want to be thorough and catch whatever is raised
|
# we want to be thorough and catch whatever is raised
|
||||||
# to avoid loop abortion
|
# to avoid loop abortion
|
||||||
LOG.exception(_LE("Exception encountered during network "
|
LOG.exception("Exception encountered during network "
|
||||||
"rescheduling"))
|
"rescheduling")
|
||||||
|
|
||||||
def get_dhcp_agents_hosting_networks(
|
def get_dhcp_agents_hosting_networks(
|
||||||
self, context, network_ids, active=None, admin_state_up=None,
|
self, context, network_ids, active=None, admin_state_up=None,
|
||||||
|
@ -35,7 +35,6 @@ from sqlalchemy import exc as sql_exc
|
|||||||
from sqlalchemy import orm
|
from sqlalchemy import orm
|
||||||
from sqlalchemy.orm import exc
|
from sqlalchemy.orm import exc
|
||||||
|
|
||||||
from neutron._i18n import _LE
|
|
||||||
from neutron.objects import exceptions as obj_exc
|
from neutron.objects import exceptions as obj_exc
|
||||||
|
|
||||||
|
|
||||||
@ -148,8 +147,8 @@ def retry_if_session_inactive(context_var_name='context'):
|
|||||||
# functions
|
# functions
|
||||||
ctx_arg_index = p_util.getargspec(f).args.index(context_var_name)
|
ctx_arg_index = p_util.getargspec(f).args.index(context_var_name)
|
||||||
except ValueError:
|
except ValueError:
|
||||||
raise RuntimeError(_LE("Could not find position of var %s")
|
raise RuntimeError("Could not find position of var %s" %
|
||||||
% context_var_name)
|
context_var_name)
|
||||||
f_with_retry = retry_db_errors(f)
|
f_with_retry = retry_db_errors(f)
|
||||||
|
|
||||||
@six.wraps(f)
|
@six.wraps(f)
|
||||||
|
@ -37,7 +37,7 @@ from sqlalchemy import and_
|
|||||||
from sqlalchemy import exc as sql_exc
|
from sqlalchemy import exc as sql_exc
|
||||||
from sqlalchemy import not_
|
from sqlalchemy import not_
|
||||||
|
|
||||||
from neutron._i18n import _, _LE, _LI
|
from neutron._i18n import _
|
||||||
from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
|
from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
|
||||||
from neutron.common import constants as n_const
|
from neutron.common import constants as n_const
|
||||||
from neutron.common import exceptions as n_exc
|
from neutron.common import exceptions as n_exc
|
||||||
@ -356,8 +356,8 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
|
|||||||
objects.append(obj_creator(context, item))
|
objects.append(obj_creator(context, item))
|
||||||
except Exception:
|
except Exception:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("An exception occurred while creating "
|
LOG.error("An exception occurred while creating "
|
||||||
"the %(resource)s:%(item)s"),
|
"the %(resource)s:%(item)s",
|
||||||
{'resource': resource, 'item': item})
|
{'resource': resource, 'item': item})
|
||||||
return objects
|
return objects
|
||||||
|
|
||||||
@ -968,9 +968,9 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
|
|||||||
def _ensure_no_user_ports_on_subnet(self, context, id):
|
def _ensure_no_user_ports_on_subnet(self, context, id):
|
||||||
alloc = self._subnet_get_user_allocation(context, id)
|
alloc = self._subnet_get_user_allocation(context, id)
|
||||||
if alloc:
|
if alloc:
|
||||||
LOG.info(_LI("Found port (%(port_id)s, %(ip)s) having IP "
|
LOG.info("Found port (%(port_id)s, %(ip)s) having IP "
|
||||||
"allocation on subnet "
|
"allocation on subnet "
|
||||||
"%(subnet)s, cannot delete"),
|
"%(subnet)s, cannot delete",
|
||||||
{'ip': alloc.ip_address,
|
{'ip': alloc.ip_address,
|
||||||
'port_id': alloc.port_id,
|
'port_id': alloc.port_id,
|
||||||
'subnet': id})
|
'subnet': id})
|
||||||
|
@ -18,7 +18,7 @@ from neutron_lib import exceptions as n_exc
|
|||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from neutron._i18n import _, _LE
|
from neutron._i18n import _
|
||||||
from neutron.common import utils
|
from neutron.common import utils
|
||||||
from neutron.db import _resource_extend as resource_extend
|
from neutron.db import _resource_extend as resource_extend
|
||||||
from neutron.extensions import dns
|
from neutron.extensions import dns
|
||||||
@ -59,8 +59,8 @@ class DNSDbMixin(object):
|
|||||||
cfg.CONF.external_dns_driver)
|
cfg.CONF.external_dns_driver)
|
||||||
return self._dns_driver
|
return self._dns_driver
|
||||||
except ImportError:
|
except ImportError:
|
||||||
LOG.exception(_LE("ImportError exception occurred while loading "
|
LOG.exception("ImportError exception occurred while loading "
|
||||||
"the external DNS service driver"))
|
"the external DNS service driver")
|
||||||
raise dns.ExternalDNSDriverNotFound(
|
raise dns.ExternalDNSDriverNotFound(
|
||||||
driver=cfg.CONF.external_dns_driver)
|
driver=cfg.CONF.external_dns_driver)
|
||||||
|
|
||||||
@ -210,10 +210,10 @@ class DNSDbMixin(object):
|
|||||||
self.dns_driver.delete_record_set(context, dns_domain, dns_name,
|
self.dns_driver.delete_record_set(context, dns_domain, dns_name,
|
||||||
records)
|
records)
|
||||||
except (dns.DNSDomainNotFound, dns.DuplicateRecordSet) as e:
|
except (dns.DNSDomainNotFound, dns.DuplicateRecordSet) as e:
|
||||||
LOG.exception(_LE("Error deleting Floating IP data from external "
|
LOG.exception("Error deleting Floating IP data from external "
|
||||||
"DNS service. Name: '%(name)s'. Domain: "
|
"DNS service. Name: '%(name)s'. Domain: "
|
||||||
"'%(domain)s'. IP addresses '%(ips)s'. DNS "
|
"'%(domain)s'. IP addresses '%(ips)s'. DNS "
|
||||||
"service driver message '%(message)s'"),
|
"service driver message '%(message)s'",
|
||||||
{"name": dns_name,
|
{"name": dns_name,
|
||||||
"domain": dns_domain,
|
"domain": dns_domain,
|
||||||
"message": e.msg,
|
"message": e.msg,
|
||||||
@ -241,10 +241,10 @@ class DNSDbMixin(object):
|
|||||||
self.dns_driver.create_record_set(context, dns_domain, dns_name,
|
self.dns_driver.create_record_set(context, dns_domain, dns_name,
|
||||||
records)
|
records)
|
||||||
except (dns.DNSDomainNotFound, dns.DuplicateRecordSet) as e:
|
except (dns.DNSDomainNotFound, dns.DuplicateRecordSet) as e:
|
||||||
LOG.exception(_LE("Error publishing floating IP data in external "
|
LOG.exception("Error publishing floating IP data in external "
|
||||||
"DNS service. Name: '%(name)s'. Domain: "
|
"DNS service. Name: '%(name)s'. Domain: "
|
||||||
"'%(domain)s'. DNS service driver message "
|
"'%(domain)s'. DNS service driver message "
|
||||||
"'%(message)s'"),
|
"'%(message)s'",
|
||||||
{"name": dns_name,
|
{"name": dns_name,
|
||||||
"domain": dns_domain,
|
"domain": dns_domain,
|
||||||
"message": e.msg})
|
"message": e.msg})
|
||||||
|
@ -27,7 +27,7 @@ from oslo_log import helpers as log_helpers
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from sqlalchemy import or_
|
from sqlalchemy import or_
|
||||||
|
|
||||||
from neutron._i18n import _, _LE
|
from neutron._i18n import _
|
||||||
from neutron.common import utils
|
from neutron.common import utils
|
||||||
from neutron.db import api as db_api
|
from neutron.db import api as db_api
|
||||||
from neutron.db import models_v2
|
from neutron.db import models_v2
|
||||||
@ -120,7 +120,7 @@ class DVRDbMixin(ext_dvr.DVRMacAddressPluginBase):
|
|||||||
try:
|
try:
|
||||||
return self._create_dvr_mac_address_retry(context, host, base_mac)
|
return self._create_dvr_mac_address_retry(context, host, base_mac)
|
||||||
except exceptions.NeutronDbObjectDuplicateEntry:
|
except exceptions.NeutronDbObjectDuplicateEntry:
|
||||||
LOG.error(_LE("MAC generation error after %s attempts"),
|
LOG.error("MAC generation error after %s attempts",
|
||||||
db_api.MAX_RETRIES)
|
db_api.MAX_RETRIES)
|
||||||
raise ext_dvr.MacAddressGenerationFailure(host=host)
|
raise ext_dvr.MacAddressGenerationFailure(host=host)
|
||||||
|
|
||||||
@ -200,8 +200,8 @@ class DVRDbMixin(ext_dvr.DVRMacAddressPluginBase):
|
|||||||
internal_gateway_ports = self.plugin.get_ports(
|
internal_gateway_ports = self.plugin.get_ports(
|
||||||
context, filters=filter)
|
context, filters=filter)
|
||||||
if not internal_gateway_ports:
|
if not internal_gateway_ports:
|
||||||
LOG.error(_LE("Could not retrieve gateway port "
|
LOG.error("Could not retrieve gateway port "
|
||||||
"for subnet %s"), subnet_info)
|
"for subnet %s", subnet_info)
|
||||||
return {}
|
return {}
|
||||||
internal_port = internal_gateway_ports[0]
|
internal_port = internal_gateway_ports[0]
|
||||||
subnet_info['gateway_mac'] = internal_port['mac_address']
|
subnet_info['gateway_mac'] = internal_port['mac_address']
|
||||||
|
@ -28,7 +28,7 @@ from oslo_log import log as logging
|
|||||||
from sqlalchemy import and_, or_
|
from sqlalchemy import and_, or_
|
||||||
from sqlalchemy.orm import exc as orm_exc
|
from sqlalchemy.orm import exc as orm_exc
|
||||||
|
|
||||||
from neutron._i18n import _, _LI
|
from neutron._i18n import _
|
||||||
from neutron.common import constants
|
from neutron.common import constants
|
||||||
from neutron.common import exceptions as n_exc
|
from neutron.common import exceptions as n_exc
|
||||||
from neutron.common import ipv6_utils
|
from neutron.common import ipv6_utils
|
||||||
@ -72,8 +72,8 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon):
|
|||||||
ip_range_pools.append(netaddr.IPRange(ip_pool['start'],
|
ip_range_pools.append(netaddr.IPRange(ip_pool['start'],
|
||||||
ip_pool['end']))
|
ip_pool['end']))
|
||||||
except netaddr.AddrFormatError:
|
except netaddr.AddrFormatError:
|
||||||
LOG.info(_LI("Found invalid IP address in pool: "
|
LOG.info("Found invalid IP address in pool: "
|
||||||
"%(start)s - %(end)s:"),
|
"%(start)s - %(end)s:",
|
||||||
{'start': ip_pool['start'],
|
{'start': ip_pool['start'],
|
||||||
'end': ip_pool['end']})
|
'end': ip_pool['end']})
|
||||||
raise n_exc.InvalidAllocationPool(pool=ip_pool)
|
raise n_exc.InvalidAllocationPool(pool=ip_pool)
|
||||||
@ -241,14 +241,14 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon):
|
|||||||
if ((netaddr.IPSet([subnet.cidr]) & new_subnet_ipset) and
|
if ((netaddr.IPSet([subnet.cidr]) & new_subnet_ipset) and
|
||||||
subnet.cidr != constants.PROVISIONAL_IPV6_PD_PREFIX):
|
subnet.cidr != constants.PROVISIONAL_IPV6_PD_PREFIX):
|
||||||
# don't give out details of the overlapping subnet
|
# don't give out details of the overlapping subnet
|
||||||
err_msg = (_("Requested subnet with cidr: %(cidr)s for "
|
err_msg = ("Requested subnet with cidr: %(cidr)s for "
|
||||||
"network: %(network_id)s overlaps with another "
|
"network: %(network_id)s overlaps with another "
|
||||||
"subnet") %
|
"subnet" %
|
||||||
{'cidr': new_subnet_cidr,
|
{'cidr': new_subnet_cidr,
|
||||||
'network_id': network.id})
|
'network_id': network.id})
|
||||||
LOG.info(_LI("Validation for CIDR: %(new_cidr)s failed - "
|
LOG.info("Validation for CIDR: %(new_cidr)s failed - "
|
||||||
"overlaps with subnet %(subnet_id)s "
|
"overlaps with subnet %(subnet_id)s "
|
||||||
"(CIDR: %(cidr)s)"),
|
"(CIDR: %(cidr)s)",
|
||||||
{'new_cidr': new_subnet_cidr,
|
{'new_cidr': new_subnet_cidr,
|
||||||
'subnet_id': subnet.id,
|
'subnet_id': subnet.id,
|
||||||
'cidr': subnet.cidr})
|
'cidr': subnet.cidr})
|
||||||
@ -284,12 +284,12 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon):
|
|||||||
end_ip = netaddr.IPAddress(ip_pool.last, ip_pool.version)
|
end_ip = netaddr.IPAddress(ip_pool.last, ip_pool.version)
|
||||||
if (start_ip.version != subnet.version or
|
if (start_ip.version != subnet.version or
|
||||||
end_ip.version != subnet.version):
|
end_ip.version != subnet.version):
|
||||||
LOG.info(_LI("Specified IP addresses do not match "
|
LOG.info("Specified IP addresses do not match "
|
||||||
"the subnet IP version"))
|
"the subnet IP version")
|
||||||
raise n_exc.InvalidAllocationPool(pool=ip_pool)
|
raise n_exc.InvalidAllocationPool(pool=ip_pool)
|
||||||
if start_ip < subnet_first_ip or end_ip > subnet_last_ip:
|
if start_ip < subnet_first_ip or end_ip > subnet_last_ip:
|
||||||
LOG.info(_LI("Found pool larger than subnet "
|
LOG.info("Found pool larger than subnet "
|
||||||
"CIDR:%(start)s - %(end)s"),
|
"CIDR:%(start)s - %(end)s",
|
||||||
{'start': start_ip, 'end': end_ip})
|
{'start': start_ip, 'end': end_ip})
|
||||||
raise n_exc.OutOfBoundsAllocationPool(
|
raise n_exc.OutOfBoundsAllocationPool(
|
||||||
pool=ip_pool,
|
pool=ip_pool,
|
||||||
@ -309,8 +309,8 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon):
|
|||||||
if ip_sets[l_cursor] & ip_sets[r_cursor]:
|
if ip_sets[l_cursor] & ip_sets[r_cursor]:
|
||||||
l_range = ip_ranges[l_cursor]
|
l_range = ip_ranges[l_cursor]
|
||||||
r_range = ip_ranges[r_cursor]
|
r_range = ip_ranges[r_cursor]
|
||||||
LOG.info(_LI("Found overlapping ranges: %(l_range)s and "
|
LOG.info("Found overlapping ranges: %(l_range)s and "
|
||||||
"%(r_range)s"),
|
"%(r_range)s",
|
||||||
{'l_range': l_range, 'r_range': r_range})
|
{'l_range': l_range, 'r_range': r_range})
|
||||||
raise n_exc.OverlappingAllocationPools(
|
raise n_exc.OverlappingAllocationPools(
|
||||||
pool_1=l_range,
|
pool_1=l_range,
|
||||||
|
@ -24,7 +24,6 @@ from oslo_log import log as logging
|
|||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
from sqlalchemy import and_
|
from sqlalchemy import and_
|
||||||
|
|
||||||
from neutron._i18n import _LE, _LW
|
|
||||||
from neutron.common import constants as n_const
|
from neutron.common import constants as n_const
|
||||||
from neutron.common import ipv6_utils
|
from neutron.common import ipv6_utils
|
||||||
from neutron.db import api as db_api
|
from neutron.db import api as db_api
|
||||||
@ -55,7 +54,7 @@ class IpamPluggableBackend(ipam_backend_mixin.IpamBackendMixin):
|
|||||||
try:
|
try:
|
||||||
func(*args, **kwargs)
|
func(*args, **kwargs)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.warning(_LW("Revert failed with: %s"), e)
|
LOG.warning("Revert failed with: %s", e)
|
||||||
|
|
||||||
def _ipam_deallocate_ips(self, context, ipam_driver, port, ips,
|
def _ipam_deallocate_ips(self, context, ipam_driver, port, ips,
|
||||||
revert_on_fail=True):
|
revert_on_fail=True):
|
||||||
@ -92,8 +91,8 @@ class IpamPluggableBackend(ipam_backend_mixin.IpamBackendMixin):
|
|||||||
elif not revert_on_fail and ips:
|
elif not revert_on_fail and ips:
|
||||||
addresses = ', '.join(self._get_failed_ips(ips,
|
addresses = ', '.join(self._get_failed_ips(ips,
|
||||||
deallocated))
|
deallocated))
|
||||||
LOG.error(_LE("IP deallocation failed on "
|
LOG.error("IP deallocation failed on "
|
||||||
"external system for %s"), addresses)
|
"external system for %s", addresses)
|
||||||
return deallocated
|
return deallocated
|
||||||
|
|
||||||
def _ipam_allocate_ips(self, context, ipam_driver, port, ips,
|
def _ipam_allocate_ips(self, context, ipam_driver, port, ips,
|
||||||
@ -146,8 +145,8 @@ class IpamPluggableBackend(ipam_backend_mixin.IpamBackendMixin):
|
|||||||
elif not revert_on_fail and ips:
|
elif not revert_on_fail and ips:
|
||||||
addresses = ', '.join(self._get_failed_ips(ips,
|
addresses = ', '.join(self._get_failed_ips(ips,
|
||||||
allocated))
|
allocated))
|
||||||
LOG.error(_LE("IP allocation failed on "
|
LOG.error("IP allocation failed on "
|
||||||
"external system for %s"), addresses)
|
"external system for %s", addresses)
|
||||||
|
|
||||||
return allocated
|
return allocated
|
||||||
|
|
||||||
|
@ -22,7 +22,7 @@ from oslo_log import log as logging
|
|||||||
import oslo_messaging
|
import oslo_messaging
|
||||||
from sqlalchemy import or_
|
from sqlalchemy import or_
|
||||||
|
|
||||||
from neutron._i18n import _, _LI
|
from neutron._i18n import _
|
||||||
from neutron.agent.common import utils as agent_utils
|
from neutron.agent.common import utils as agent_utils
|
||||||
from neutron.common import constants as l_consts
|
from neutron.common import constants as l_consts
|
||||||
from neutron.common import utils as n_utils
|
from neutron.common import utils as n_utils
|
||||||
@ -66,8 +66,8 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase,
|
|||||||
|
|
||||||
def add_periodic_l3_agent_status_check(self):
|
def add_periodic_l3_agent_status_check(self):
|
||||||
if not cfg.CONF.allow_automatic_l3agent_failover:
|
if not cfg.CONF.allow_automatic_l3agent_failover:
|
||||||
LOG.info(_LI("Skipping period L3 agent status check because "
|
LOG.info("Skipping period L3 agent status check because "
|
||||||
"automatic router rescheduling is disabled."))
|
"automatic router rescheduling is disabled.")
|
||||||
return
|
return
|
||||||
|
|
||||||
self.add_agent_status_check_worker(
|
self.add_agent_status_check_worker(
|
||||||
@ -322,8 +322,8 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase,
|
|||||||
agent = self._get_agent_by_type_and_host(
|
agent = self._get_agent_by_type_and_host(
|
||||||
context, constants.AGENT_TYPE_L3, host)
|
context, constants.AGENT_TYPE_L3, host)
|
||||||
if not agentschedulers_db.services_available(agent.admin_state_up):
|
if not agentschedulers_db.services_available(agent.admin_state_up):
|
||||||
LOG.info(_LI("Agent has its services disabled. Returning "
|
LOG.info("Agent has its services disabled. Returning "
|
||||||
"no active routers. Agent: %s"), agent)
|
"no active routers. Agent: %s", agent)
|
||||||
return []
|
return []
|
||||||
scheduled_router_ids = self._get_router_ids_for_agent(
|
scheduled_router_ids = self._get_router_ids_for_agent(
|
||||||
context, agent, router_ids)
|
context, agent, router_ids)
|
||||||
|
@ -35,7 +35,7 @@ import six
|
|||||||
from sqlalchemy import orm
|
from sqlalchemy import orm
|
||||||
from sqlalchemy.orm import exc
|
from sqlalchemy.orm import exc
|
||||||
|
|
||||||
from neutron._i18n import _, _LE, _LI, _LW
|
from neutron._i18n import _
|
||||||
from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
|
from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
|
||||||
from neutron.common import constants as n_const
|
from neutron.common import constants as n_const
|
||||||
from neutron.common import ipv6_utils
|
from neutron.common import ipv6_utils
|
||||||
@ -145,21 +145,21 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
|
|||||||
try:
|
try:
|
||||||
self._fix_or_kill_floating_port(context, port_id)
|
self._fix_or_kill_floating_port(context, port_id)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Error cleaning up floating IP port: %s"),
|
LOG.exception("Error cleaning up floating IP port: %s",
|
||||||
port_id)
|
port_id)
|
||||||
|
|
||||||
def _fix_or_kill_floating_port(self, context, port_id):
|
def _fix_or_kill_floating_port(self, context, port_id):
|
||||||
fip = (context.session.query(l3_models.FloatingIP).
|
fip = (context.session.query(l3_models.FloatingIP).
|
||||||
filter_by(floating_port_id=port_id).first())
|
filter_by(floating_port_id=port_id).first())
|
||||||
if fip:
|
if fip:
|
||||||
LOG.warning(_LW("Found incorrect device_id on floating port "
|
LOG.warning("Found incorrect device_id on floating port "
|
||||||
"%(pid)s, correcting to %(fip)s."),
|
"%(pid)s, correcting to %(fip)s.",
|
||||||
{'pid': port_id, 'fip': fip.id})
|
{'pid': port_id, 'fip': fip.id})
|
||||||
self._core_plugin.update_port(
|
self._core_plugin.update_port(
|
||||||
context, port_id, {'port': {'device_id': fip.id}})
|
context, port_id, {'port': {'device_id': fip.id}})
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW("Found floating IP port %s without floating IP, "
|
LOG.warning("Found floating IP port %s without floating IP, "
|
||||||
"deleting."), port_id)
|
"deleting.", port_id)
|
||||||
self._core_plugin.delete_port(
|
self._core_plugin.delete_port(
|
||||||
context, port_id, l3_port_check=False)
|
context, port_id, l3_port_check=False)
|
||||||
|
|
||||||
@ -1616,8 +1616,8 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
|
|||||||
if not fixed_ips:
|
if not fixed_ips:
|
||||||
# Skip ports without IPs, which can occur if a subnet
|
# Skip ports without IPs, which can occur if a subnet
|
||||||
# attached to a router is deleted
|
# attached to a router is deleted
|
||||||
LOG.info(_LI("Skipping port %s as no IP is configure on "
|
LOG.info("Skipping port %s as no IP is configure on "
|
||||||
"it"),
|
"it",
|
||||||
port['id'])
|
port['id'])
|
||||||
continue
|
continue
|
||||||
yield port
|
yield port
|
||||||
|
@ -29,7 +29,7 @@ from oslo_log import log as logging
|
|||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from neutron._i18n import _, _LE, _LI, _LW
|
from neutron._i18n import _
|
||||||
from neutron.common import constants as l3_const
|
from neutron.common import constants as l3_const
|
||||||
from neutron.common import utils as n_utils
|
from neutron.common import utils as n_utils
|
||||||
from neutron.db import api as db_api
|
from neutron.db import api as db_api
|
||||||
@ -227,8 +227,8 @@ class DVRResourceOperationHandler(object):
|
|||||||
port_type=const.DEVICE_OWNER_DVR_INTERFACE
|
port_type=const.DEVICE_OWNER_DVR_INTERFACE
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
LOG.info(_LI('SNAT interface port list does not exist,'
|
LOG.info('SNAT interface port list does not exist,'
|
||||||
' so create one: %s'), port_list)
|
' so create one: %s', port_list)
|
||||||
for intf in int_ports:
|
for intf in int_ports:
|
||||||
if intf.fixed_ips:
|
if intf.fixed_ips:
|
||||||
# Passing the subnet for the port to make sure the IP's
|
# Passing the subnet for the port to make sure the IP's
|
||||||
@ -431,8 +431,8 @@ class DVRResourceOperationHandler(object):
|
|||||||
try:
|
try:
|
||||||
revert()
|
revert()
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Failed to revert change "
|
LOG.exception("Failed to revert change "
|
||||||
"to router port %s."),
|
"to router port %s.",
|
||||||
port['id'])
|
port['id'])
|
||||||
LOG.debug("CSNAT port updated for IPv6 subnet: %s", updated_port)
|
LOG.debug("CSNAT port updated for IPv6 subnet: %s", updated_port)
|
||||||
|
|
||||||
@ -544,7 +544,7 @@ class DVRResourceOperationHandler(object):
|
|||||||
for p in c_snat_ports:
|
for p in c_snat_ports:
|
||||||
if subnet_id is None or not p['fixed_ips']:
|
if subnet_id is None or not p['fixed_ips']:
|
||||||
if not p['fixed_ips']:
|
if not p['fixed_ips']:
|
||||||
LOG.info(_LI("CSNAT port has no IPs: %s"), p)
|
LOG.info("CSNAT port has no IPs: %s", p)
|
||||||
self.l3plugin._core_plugin.delete_port(context,
|
self.l3plugin._core_plugin.delete_port(context,
|
||||||
p['id'],
|
p['id'],
|
||||||
l3_port_check=False)
|
l3_port_check=False)
|
||||||
@ -847,8 +847,8 @@ class _DVRAgentInterfaceMixin(object):
|
|||||||
f_port = self._get_agent_gw_ports_exist_for_network(
|
f_port = self._get_agent_gw_ports_exist_for_network(
|
||||||
context, network_id, host, l3_agent_db['id'])
|
context, network_id, host, l3_agent_db['id'])
|
||||||
if not f_port:
|
if not f_port:
|
||||||
LOG.info(_LI('Agent Gateway port does not exist,'
|
LOG.info('Agent Gateway port does not exist,'
|
||||||
' so create one: %s'), f_port)
|
' so create one: %s', f_port)
|
||||||
port_data = {'tenant_id': '',
|
port_data = {'tenant_id': '',
|
||||||
'network_id': network_id,
|
'network_id': network_id,
|
||||||
'device_id': l3_agent_db['id'],
|
'device_id': l3_agent_db['id'],
|
||||||
@ -1010,8 +1010,8 @@ class L3_NAT_with_dvr_db_mixin(_DVRAgentInterfaceMixin,
|
|||||||
# using admin context as router may belong to admin tenant
|
# using admin context as router may belong to admin tenant
|
||||||
router = self._get_router(context.elevated(), router_id)
|
router = self._get_router(context.elevated(), router_id)
|
||||||
except l3.RouterNotFound:
|
except l3.RouterNotFound:
|
||||||
LOG.warning(_LW("Router %s was not found. "
|
LOG.warning("Router %s was not found. "
|
||||||
"Skipping agent notification."),
|
"Skipping agent notification.",
|
||||||
router_id)
|
router_id)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -35,7 +35,7 @@ import sqlalchemy as sa
|
|||||||
from sqlalchemy import exc as sql_exc
|
from sqlalchemy import exc as sql_exc
|
||||||
from sqlalchemy import orm
|
from sqlalchemy import orm
|
||||||
|
|
||||||
from neutron._i18n import _, _LE, _LI, _LW
|
from neutron._i18n import _
|
||||||
from neutron.common import constants as n_const
|
from neutron.common import constants as n_const
|
||||||
from neutron.common import utils as n_utils
|
from neutron.common import utils as n_utils
|
||||||
from neutron.db import _utils as db_utils
|
from neutron.db import _utils as db_utils
|
||||||
@ -166,8 +166,8 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin,
|
|||||||
return allocation.vr_id
|
return allocation.vr_id
|
||||||
|
|
||||||
except db_exc.DBDuplicateEntry:
|
except db_exc.DBDuplicateEntry:
|
||||||
LOG.info(_LI("Attempt %(count)s to allocate a VRID in the "
|
LOG.info("Attempt %(count)s to allocate a VRID in the "
|
||||||
"network %(network)s for the router %(router)s"),
|
"network %(network)s for the router %(router)s",
|
||||||
{'count': count, 'network': network_id,
|
{'count': count, 'network': network_id,
|
||||||
'router': router_id})
|
'router': router_id})
|
||||||
|
|
||||||
@ -255,9 +255,9 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin,
|
|||||||
max_agents = cfg.CONF.max_l3_agents_per_router
|
max_agents = cfg.CONF.max_l3_agents_per_router
|
||||||
if max_agents:
|
if max_agents:
|
||||||
if max_agents > num_agents:
|
if max_agents > num_agents:
|
||||||
LOG.info(_LI("Number of active agents lower than "
|
LOG.info("Number of active agents lower than "
|
||||||
"max_l3_agents_per_router. L3 agents "
|
"max_l3_agents_per_router. L3 agents "
|
||||||
"available: %s"), num_agents)
|
"available: %s", num_agents)
|
||||||
else:
|
else:
|
||||||
num_agents = max_agents
|
num_agents = max_agents
|
||||||
|
|
||||||
@ -414,9 +414,9 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin,
|
|||||||
with excutils.save_and_reraise_exception() as ctx:
|
with excutils.save_and_reraise_exception() as ctx:
|
||||||
if isinstance(e, l3_ha.NoVRIDAvailable):
|
if isinstance(e, l3_ha.NoVRIDAvailable):
|
||||||
ctx.reraise = False
|
ctx.reraise = False
|
||||||
LOG.warning(_LW("No more VRIDs for router: %s"), e)
|
LOG.warning("No more VRIDs for router: %s", e)
|
||||||
else:
|
else:
|
||||||
LOG.exception(_LE("Failed to schedule HA router %s."),
|
LOG.exception("Failed to schedule HA router %s.",
|
||||||
router_id)
|
router_id)
|
||||||
router['status'] = self._update_router_db(
|
router['status'] = self._update_router_db(
|
||||||
context, router_id,
|
context, router_id,
|
||||||
@ -502,15 +502,15 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin,
|
|||||||
LOG.debug(
|
LOG.debug(
|
||||||
"HA network for tenant %s was already deleted.", tenant_id)
|
"HA network for tenant %s was already deleted.", tenant_id)
|
||||||
except sa.exc.InvalidRequestError:
|
except sa.exc.InvalidRequestError:
|
||||||
LOG.info(_LI("HA network %s can not be deleted."), net_id)
|
LOG.info("HA network %s can not be deleted.", net_id)
|
||||||
except n_exc.NetworkInUse:
|
except n_exc.NetworkInUse:
|
||||||
# network is still in use, this is normal so we don't
|
# network is still in use, this is normal so we don't
|
||||||
# log anything
|
# log anything
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI("HA network %(network)s was deleted as "
|
LOG.info("HA network %(network)s was deleted as "
|
||||||
"no HA routers are present in tenant "
|
"no HA routers are present in tenant "
|
||||||
"%(tenant)s."),
|
"%(tenant)s.",
|
||||||
{'network': net_id, 'tenant': tenant_id})
|
{'network': net_id, 'tenant': tenant_id})
|
||||||
|
|
||||||
@registry.receives(resources.ROUTER, [events.PRECOMMIT_DELETE])
|
@registry.receives(resources.ROUTER, [events.PRECOMMIT_DELETE])
|
||||||
@ -622,8 +622,8 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin,
|
|||||||
port = binding.port
|
port = binding.port
|
||||||
if not port:
|
if not port:
|
||||||
# Filter the HA router has no ha port here
|
# Filter the HA router has no ha port here
|
||||||
LOG.info(_LI("HA router %s is missing HA router port "
|
LOG.info("HA router %s is missing HA router port "
|
||||||
"bindings. Skipping it."),
|
"bindings. Skipping it.",
|
||||||
binding.router_id)
|
binding.router_id)
|
||||||
routers_dict.pop(binding.router_id)
|
routers_dict.pop(binding.router_id)
|
||||||
continue
|
continue
|
||||||
|
@ -18,7 +18,6 @@ from neutron_lib.plugins import directory
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
import oslo_messaging
|
import oslo_messaging
|
||||||
|
|
||||||
from neutron._i18n import _LE
|
|
||||||
from neutron.common import utils
|
from neutron.common import utils
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -44,7 +43,7 @@ class MeteringRpcCallbacks(object):
|
|||||||
else:
|
else:
|
||||||
agents = l3_plugin.get_l3_agents(context, filters={'host': [host]})
|
agents = l3_plugin.get_l3_agents(context, filters={'host': [host]})
|
||||||
if not agents:
|
if not agents:
|
||||||
LOG.error(_LE('Unable to find agent %s.'), host)
|
LOG.error('Unable to find agent %s.', host)
|
||||||
return
|
return
|
||||||
|
|
||||||
routers = l3_plugin.list_routers_on_l3_agent(context, agents[0].id)
|
routers = l3_plugin.list_routers_on_l3_agent(context, agents[0].id)
|
||||||
|
@ -17,7 +17,6 @@ from neutron_lib.callbacks import registry
|
|||||||
from neutron_lib.callbacks import resources
|
from neutron_lib.callbacks import resources
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from neutron._i18n import _LE
|
|
||||||
from neutron.db import api as db_api
|
from neutron.db import api as db_api
|
||||||
from neutron.db import models_v2
|
from neutron.db import models_v2
|
||||||
from neutron.objects import provisioning_blocks as pb_obj
|
from neutron.objects import provisioning_blocks as pb_obj
|
||||||
@ -122,7 +121,7 @@ def provisioning_complete(context, object_id, object_type, entity):
|
|||||||
# this can't be called in a transaction to avoid REPEATABLE READ
|
# this can't be called in a transaction to avoid REPEATABLE READ
|
||||||
# tricking us into thinking there are remaining provisioning components
|
# tricking us into thinking there are remaining provisioning components
|
||||||
if context.session.is_active:
|
if context.session.is_active:
|
||||||
raise RuntimeError(_LE("Must not be called in a transaction"))
|
raise RuntimeError("Must not be called in a transaction")
|
||||||
standard_attr_id = _get_standard_attr_id(context, object_id,
|
standard_attr_id = _get_standard_attr_id(context, object_id,
|
||||||
object_type)
|
object_type)
|
||||||
if not standard_attr_id:
|
if not standard_attr_id:
|
||||||
@ -162,10 +161,10 @@ def is_object_blocked(context, object_id, object_type):
|
|||||||
def _get_standard_attr_id(context, object_id, object_type):
|
def _get_standard_attr_id(context, object_id, object_type):
|
||||||
model = _RESOURCE_TO_MODEL_MAP.get(object_type)
|
model = _RESOURCE_TO_MODEL_MAP.get(object_type)
|
||||||
if not model:
|
if not model:
|
||||||
raise RuntimeError(_LE("Could not find model for %s. If you are "
|
raise RuntimeError("Could not find model for %s. If you are "
|
||||||
"adding provisioning blocks for a new resource "
|
"adding provisioning blocks for a new resource "
|
||||||
"you must call add_model_for_resource during "
|
"you must call add_model_for_resource during "
|
||||||
"initialization for your type.") % object_type)
|
"initialization for your type." % object_type)
|
||||||
obj = (context.session.query(model).enable_eagerloads(False).
|
obj = (context.session.query(model).enable_eagerloads(False).
|
||||||
filter_by(id=object_id).first())
|
filter_by(id=object_id).first())
|
||||||
if not obj:
|
if not obj:
|
||||||
|
@ -16,7 +16,6 @@ from neutron_lib.callbacks import resources
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from oslo_utils import uuidutils
|
from oslo_utils import uuidutils
|
||||||
|
|
||||||
from neutron._i18n import _LI
|
|
||||||
from neutron.db import api as db_api
|
from neutron.db import api as db_api
|
||||||
from neutron.db.models import segment as segments_model
|
from neutron.db.models import segment as segments_model
|
||||||
from neutron.objects import base as base_obj
|
from neutron.objects import base as base_obj
|
||||||
@ -55,8 +54,8 @@ def add_network_segment(context, network_id, segment, segment_index=0,
|
|||||||
context=context,
|
context=context,
|
||||||
segment=netseg_obj)
|
segment=netseg_obj)
|
||||||
segment['id'] = netseg_obj.id
|
segment['id'] = netseg_obj.id
|
||||||
LOG.info(_LI("Added segment %(id)s of type %(network_type)s for network "
|
LOG.info("Added segment %(id)s of type %(network_type)s for network "
|
||||||
"%(network_id)s"),
|
"%(network_id)s",
|
||||||
{'id': netseg_obj.id,
|
{'id': netseg_obj.id,
|
||||||
'network_type': netseg_obj.network_type,
|
'network_type': netseg_obj.network_type,
|
||||||
'network_id': netseg_obj.network_id})
|
'network_id': netseg_obj.network_id})
|
||||||
|
@ -21,7 +21,7 @@ from sqlalchemy.ext.associationproxy import association_proxy
|
|||||||
from sqlalchemy.ext import declarative
|
from sqlalchemy.ext import declarative
|
||||||
from sqlalchemy.orm import session as se
|
from sqlalchemy.orm import session as se
|
||||||
|
|
||||||
from neutron._i18n import _, _LE
|
from neutron._i18n import _
|
||||||
from neutron.db import sqlalchemytypes
|
from neutron.db import sqlalchemytypes
|
||||||
|
|
||||||
|
|
||||||
@ -178,9 +178,9 @@ def get_standard_attr_resource_model_map():
|
|||||||
for subclass in HasStandardAttributes.__subclasses__():
|
for subclass in HasStandardAttributes.__subclasses__():
|
||||||
for resource in subclass.get_api_collections():
|
for resource in subclass.get_api_collections():
|
||||||
if resource in rs_map:
|
if resource in rs_map:
|
||||||
raise RuntimeError(_LE("Model %(sub)s tried to register for "
|
raise RuntimeError("Model %(sub)s tried to register for "
|
||||||
"API resource %(res)s which conflicts "
|
"API resource %(res)s which conflicts "
|
||||||
"with model %(other)s.") %
|
"with model %(other)s." %
|
||||||
dict(sub=subclass, other=rs_map[resource],
|
dict(sub=subclass, other=rs_map[resource],
|
||||||
res=resource))
|
res=resource))
|
||||||
rs_map[resource] = subclass
|
rs_map[resource] = subclass
|
||||||
@ -206,8 +206,8 @@ def get_tag_resource_parent_map():
|
|||||||
@event.listens_for(se.Session, 'after_bulk_delete')
|
@event.listens_for(se.Session, 'after_bulk_delete')
|
||||||
def throw_exception_on_bulk_delete_of_listened_for_objects(delete_context):
|
def throw_exception_on_bulk_delete_of_listened_for_objects(delete_context):
|
||||||
if hasattr(delete_context.mapper.class_, 'revises_on_change'):
|
if hasattr(delete_context.mapper.class_, 'revises_on_change'):
|
||||||
raise RuntimeError(_LE("%s may not be deleted in bulk because it "
|
raise RuntimeError("%s may not be deleted in bulk because it "
|
||||||
"bumps the revision of other resources via "
|
"bumps the revision of other resources via "
|
||||||
"SQLAlchemy event handlers, which are not "
|
"SQLAlchemy event handlers, which are not "
|
||||||
"compatible with bulk deletes.") %
|
"compatible with bulk deletes." %
|
||||||
delete_context.mapper.class_)
|
delete_context.mapper.class_)
|
||||||
|
@ -18,7 +18,7 @@ from neutronclient.common import utils
|
|||||||
from neutronclient.neutron import v2_0 as client
|
from neutronclient.neutron import v2_0 as client
|
||||||
from neutronclient.neutron.v2_0 import port
|
from neutronclient.neutron.v2_0 import port
|
||||||
|
|
||||||
from neutron._i18n import _, _LI
|
from neutron._i18n import _
|
||||||
|
|
||||||
|
|
||||||
class ProbeCommand(client.NeutronCommand):
|
class ProbeCommand(client.NeutronCommand):
|
||||||
@ -84,7 +84,7 @@ class ClearProbe(ProbeCommand):
|
|||||||
def take_action(self, parsed_args):
|
def take_action(self, parsed_args):
|
||||||
debug_agent = self.get_debug_agent()
|
debug_agent = self.get_debug_agent()
|
||||||
cleared_probes_count = debug_agent.clear_probes()
|
cleared_probes_count = debug_agent.clear_probes()
|
||||||
self.log.info(_LI('%d probe(s) deleted'), cleared_probes_count)
|
self.log.info('%d probe(s) deleted', cleared_probes_count)
|
||||||
|
|
||||||
|
|
||||||
class ExecProbe(ProbeCommand):
|
class ExecProbe(ProbeCommand):
|
||||||
|
@ -21,7 +21,6 @@ from neutron_lib.api.definitions import portbindings
|
|||||||
from neutron_lib import constants
|
from neutron_lib import constants
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from neutron._i18n import _LW
|
|
||||||
from neutron.agent.linux import dhcp
|
from neutron.agent.linux import dhcp
|
||||||
from neutron.agent.linux import ip_lib
|
from neutron.agent.linux import ip_lib
|
||||||
|
|
||||||
@ -108,7 +107,7 @@ class NeutronDebugAgent(object):
|
|||||||
try:
|
try:
|
||||||
ip.netns.delete(namespace)
|
ip.netns.delete(namespace)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.warning(_LW('Failed to delete namespace %s'), namespace)
|
LOG.warning('Failed to delete namespace %s', namespace)
|
||||||
else:
|
else:
|
||||||
self.driver.unplug(self.driver.get_device_name(port),
|
self.driver.unplug(self.driver.get_device_name(port),
|
||||||
bridge=bridge)
|
bridge=bridge)
|
||||||
|
@ -19,7 +19,6 @@ from oslo_config import cfg
|
|||||||
from oslo_utils import importutils
|
from oslo_utils import importutils
|
||||||
|
|
||||||
from neutron._i18n import _
|
from neutron._i18n import _
|
||||||
from neutron._i18n import _LW
|
|
||||||
from neutron.agent.common import utils
|
from neutron.agent.common import utils
|
||||||
from neutron.agent.linux import interface
|
from neutron.agent.linux import interface
|
||||||
from neutron.conf.agent import common as config
|
from neutron.conf.agent import common as config
|
||||||
@ -81,9 +80,9 @@ class NeutronDebugShell(shell.NeutronShell):
|
|||||||
self.debug_agent = debug_agent.NeutronDebugAgent(cfg.CONF,
|
self.debug_agent = debug_agent.NeutronDebugAgent(cfg.CONF,
|
||||||
client,
|
client,
|
||||||
driver)
|
driver)
|
||||||
self.log.warning(_LW('This tool is deprecated and will be removed '
|
self.log.warning('This tool is deprecated and will be removed '
|
||||||
'in the future to be replaced with a more '
|
'in the future to be replaced with a more '
|
||||||
'powerful troubleshooting toolkit.'))
|
'powerful troubleshooting toolkit.')
|
||||||
|
|
||||||
|
|
||||||
def main(argv=None):
|
def main(argv=None):
|
||||||
|
@ -24,7 +24,7 @@ from oslo_log import log as logging
|
|||||||
import six
|
import six
|
||||||
import webob.exc
|
import webob.exc
|
||||||
|
|
||||||
from neutron._i18n import _, _LE
|
from neutron._i18n import _
|
||||||
from neutron.api import extensions
|
from neutron.api import extensions
|
||||||
from neutron.api.v2 import base
|
from neutron.api.v2 import base
|
||||||
from neutron.api.v2 import resource
|
from neutron.api.v2 import resource
|
||||||
@ -47,8 +47,8 @@ class RouterSchedulerController(wsgi.Controller):
|
|||||||
def get_plugin(self):
|
def get_plugin(self):
|
||||||
plugin = directory.get_plugin(plugin_constants.L3)
|
plugin = directory.get_plugin(plugin_constants.L3)
|
||||||
if not plugin:
|
if not plugin:
|
||||||
LOG.error(_LE('No plugin for L3 routing registered to handle '
|
LOG.error('No plugin for L3 routing registered to handle '
|
||||||
'router scheduling'))
|
'router scheduling')
|
||||||
msg = _('The resource could not be found.')
|
msg = _('The resource could not be found.')
|
||||||
raise webob.exc.HTTPNotFound(msg)
|
raise webob.exc.HTTPNotFound(msg)
|
||||||
return plugin
|
return plugin
|
||||||
@ -89,8 +89,8 @@ class L3AgentsHostingRouterController(wsgi.Controller):
|
|||||||
def get_plugin(self):
|
def get_plugin(self):
|
||||||
plugin = directory.get_plugin(plugin_constants.L3)
|
plugin = directory.get_plugin(plugin_constants.L3)
|
||||||
if not plugin:
|
if not plugin:
|
||||||
LOG.error(_LE('No plugin for L3 routing registered to handle '
|
LOG.error('No plugin for L3 routing registered to handle '
|
||||||
'router scheduling'))
|
'router scheduling')
|
||||||
msg = _('The resource could not be found.')
|
msg = _('The resource could not be found.')
|
||||||
raise webob.exc.HTTPNotFound(msg)
|
raise webob.exc.HTTPNotFound(msg)
|
||||||
return plugin
|
return plugin
|
||||||
|
@ -20,7 +20,7 @@ from neutron_lib import exceptions
|
|||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from neutron._i18n import _, _LI
|
from neutron._i18n import _
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -45,7 +45,7 @@ def disable_extension_by_config(aliases):
|
|||||||
if not cfg.CONF.vlan_transparent:
|
if not cfg.CONF.vlan_transparent:
|
||||||
if 'vlan-transparent' in aliases:
|
if 'vlan-transparent' in aliases:
|
||||||
aliases.remove('vlan-transparent')
|
aliases.remove('vlan-transparent')
|
||||||
LOG.info(_LI('Disabled vlantransparent extension.'))
|
LOG.info('Disabled vlantransparent extension.')
|
||||||
|
|
||||||
|
|
||||||
def get_vlan_transparent(network):
|
def get_vlan_transparent(network):
|
||||||
|
@ -23,7 +23,7 @@ from oslo_db import exception as db_exc
|
|||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
from oslo_utils import uuidutils
|
from oslo_utils import uuidutils
|
||||||
|
|
||||||
from neutron._i18n import _, _LE
|
from neutron._i18n import _
|
||||||
from neutron.ipam import driver as ipam_base
|
from neutron.ipam import driver as ipam_base
|
||||||
from neutron.ipam.drivers.neutrondb_ipam import db_api as ipam_db_api
|
from neutron.ipam.drivers.neutrondb_ipam import db_api as ipam_db_api
|
||||||
from neutron.ipam import exceptions as ipam_exc
|
from neutron.ipam import exceptions as ipam_exc
|
||||||
@ -90,9 +90,8 @@ class NeutronDbSubnet(ipam_base.Subnet):
|
|||||||
ipam_subnet = ipam_db_api.IpamSubnetManager.load_by_neutron_subnet_id(
|
ipam_subnet = ipam_db_api.IpamSubnetManager.load_by_neutron_subnet_id(
|
||||||
ctx, neutron_subnet_id)
|
ctx, neutron_subnet_id)
|
||||||
if not ipam_subnet:
|
if not ipam_subnet:
|
||||||
LOG.error(_LE("IPAM subnet referenced to "
|
LOG.error("IPAM subnet referenced to "
|
||||||
"Neutron subnet %s does not exist"),
|
"Neutron subnet %s does not exist", neutron_subnet_id)
|
||||||
neutron_subnet_id)
|
|
||||||
raise n_exc.SubnetNotFound(subnet_id=neutron_subnet_id)
|
raise n_exc.SubnetNotFound(subnet_id=neutron_subnet_id)
|
||||||
pools = []
|
pools = []
|
||||||
for pool in ipam_subnet.allocation_pools:
|
for pool in ipam_subnet.allocation_pools:
|
||||||
@ -316,9 +315,8 @@ class NeutronDbPool(subnet_alloc.SubnetAllocator):
|
|||||||
count = ipam_db_api.IpamSubnetManager.delete(self._context,
|
count = ipam_db_api.IpamSubnetManager.delete(self._context,
|
||||||
subnet_id)
|
subnet_id)
|
||||||
if count < 1:
|
if count < 1:
|
||||||
LOG.error(_LE("IPAM subnet referenced to "
|
LOG.error("IPAM subnet referenced to "
|
||||||
"Neutron subnet %s does not exist"),
|
"Neutron subnet %s does not exist", subnet_id)
|
||||||
subnet_id)
|
|
||||||
raise n_exc.SubnetNotFound(subnet_id=subnet_id)
|
raise n_exc.SubnetNotFound(subnet_id=subnet_id)
|
||||||
|
|
||||||
def needs_rollback(self):
|
def needs_rollback(self):
|
||||||
|
@ -25,7 +25,7 @@ from oslo_utils import excutils
|
|||||||
from osprofiler import profiler
|
from osprofiler import profiler
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from neutron._i18n import _, _LE, _LI
|
from neutron._i18n import _
|
||||||
from neutron.common import utils
|
from neutron.common import utils
|
||||||
from neutron.plugins.common import constants
|
from neutron.plugins.common import constants
|
||||||
|
|
||||||
@ -126,7 +126,7 @@ class NeutronManager(object):
|
|||||||
# intentionally to allow v2 plugins to be monitored
|
# intentionally to allow v2 plugins to be monitored
|
||||||
# for performance metrics.
|
# for performance metrics.
|
||||||
plugin_provider = cfg.CONF.core_plugin
|
plugin_provider = cfg.CONF.core_plugin
|
||||||
LOG.info(_LI("Loading core plugin: %s"), plugin_provider)
|
LOG.info("Loading core plugin: %s", plugin_provider)
|
||||||
# NOTE(armax): keep hold of the actual plugin object
|
# NOTE(armax): keep hold of the actual plugin object
|
||||||
plugin = self._get_plugin_instance(CORE_PLUGINS_NAMESPACE,
|
plugin = self._get_plugin_instance(CORE_PLUGINS_NAMESPACE,
|
||||||
plugin_provider)
|
plugin_provider)
|
||||||
@ -159,7 +159,7 @@ class NeutronManager(object):
|
|||||||
plugin_provider)
|
plugin_provider)
|
||||||
except ImportError:
|
except ImportError:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.error(_LE("Plugin '%s' not found."), plugin_provider)
|
LOG.error("Plugin '%s' not found.", plugin_provider)
|
||||||
|
|
||||||
def _get_plugin_instance(self, namespace, plugin_provider):
|
def _get_plugin_instance(self, namespace, plugin_provider):
|
||||||
plugin_class = self.load_class_for_provider(namespace, plugin_provider)
|
plugin_class = self.load_class_for_provider(namespace, plugin_provider)
|
||||||
@ -174,7 +174,7 @@ class NeutronManager(object):
|
|||||||
if ext_alias in constants.EXT_TO_SERVICE_MAPPING:
|
if ext_alias in constants.EXT_TO_SERVICE_MAPPING:
|
||||||
service_type = constants.EXT_TO_SERVICE_MAPPING[ext_alias]
|
service_type = constants.EXT_TO_SERVICE_MAPPING[ext_alias]
|
||||||
directory.add_plugin(service_type, plugin)
|
directory.add_plugin(service_type, plugin)
|
||||||
LOG.info(_LI("Service %s is supported by the core plugin"),
|
LOG.info("Service %s is supported by the core plugin",
|
||||||
service_type)
|
service_type)
|
||||||
|
|
||||||
def _get_default_service_plugins(self):
|
def _get_default_service_plugins(self):
|
||||||
@ -194,7 +194,7 @@ class NeutronManager(object):
|
|||||||
if provider == '':
|
if provider == '':
|
||||||
continue
|
continue
|
||||||
|
|
||||||
LOG.info(_LI("Loading Plugin: %s"), provider)
|
LOG.info("Loading Plugin: %s", provider)
|
||||||
plugin_inst = self._get_plugin_instance('neutron.service_plugins',
|
plugin_inst = self._get_plugin_instance('neutron.service_plugins',
|
||||||
provider)
|
provider)
|
||||||
|
|
||||||
|
@ -29,7 +29,6 @@ from oslo_log import log as logging
|
|||||||
from oslo_utils import uuidutils
|
from oslo_utils import uuidutils
|
||||||
from sqlalchemy.orm import attributes as sql_attr
|
from sqlalchemy.orm import attributes as sql_attr
|
||||||
|
|
||||||
from neutron._i18n import _LE, _LI, _LW
|
|
||||||
from neutron.notifiers import batch_notifier
|
from neutron.notifiers import batch_notifier
|
||||||
|
|
||||||
|
|
||||||
@ -162,8 +161,8 @@ class Notifier(object):
|
|||||||
|
|
||||||
def _can_notify(self, port):
|
def _can_notify(self, port):
|
||||||
if not port.id:
|
if not port.id:
|
||||||
LOG.warning(_LW("Port ID not set! Nova will not be notified of "
|
LOG.warning("Port ID not set! Nova will not be notified of "
|
||||||
"port status change."))
|
"port status change.")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# If there is no device_id set there is nothing we can do here.
|
# If there is no device_id set there is nothing we can do here.
|
||||||
@ -248,11 +247,11 @@ class Notifier(object):
|
|||||||
LOG.debug("Nova returned NotFound for event: %s",
|
LOG.debug("Nova returned NotFound for event: %s",
|
||||||
batched_events)
|
batched_events)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Failed to notify nova on events: %s"),
|
LOG.exception("Failed to notify nova on events: %s",
|
||||||
batched_events)
|
batched_events)
|
||||||
else:
|
else:
|
||||||
if not isinstance(response, list):
|
if not isinstance(response, list):
|
||||||
LOG.error(_LE("Error response returned from nova: %s"),
|
LOG.error("Error response returned from nova: %s",
|
||||||
response)
|
response)
|
||||||
return
|
return
|
||||||
response_error = False
|
response_error = False
|
||||||
@ -263,10 +262,10 @@ class Notifier(object):
|
|||||||
response_error = True
|
response_error = True
|
||||||
continue
|
continue
|
||||||
if code != 200:
|
if code != 200:
|
||||||
LOG.warning(_LW("Nova event: %s returned with failed "
|
LOG.warning("Nova event: %s returned with failed "
|
||||||
"status"), event)
|
"status", event)
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI("Nova event response: %s"), event)
|
LOG.info("Nova event response: %s", event)
|
||||||
if response_error:
|
if response_error:
|
||||||
LOG.error(_LE("Error response returned from nova: %s"),
|
LOG.error("Error response returned from nova: %s",
|
||||||
response)
|
response)
|
||||||
|
@ -16,7 +16,6 @@ from oslo_log import log as logging
|
|||||||
import pecan
|
import pecan
|
||||||
from pecan import request
|
from pecan import request
|
||||||
|
|
||||||
from neutron._i18n import _LW
|
|
||||||
from neutron import manager
|
from neutron import manager
|
||||||
from neutron.pecan_wsgi.controllers import utils
|
from neutron.pecan_wsgi.controllers import utils
|
||||||
|
|
||||||
@ -85,8 +84,8 @@ class ItemController(utils.NeutronPecanController):
|
|||||||
collection_path)
|
collection_path)
|
||||||
if not controller:
|
if not controller:
|
||||||
if collection not in self._member_actions:
|
if collection not in self._member_actions:
|
||||||
LOG.warning(_LW("No controller found for: %s - returning"
|
LOG.warning("No controller found for: %s - returning"
|
||||||
"response code 404"), collection)
|
"response code 404", collection)
|
||||||
pecan.abort(404)
|
pecan.abort(404)
|
||||||
# collection is a member action, so we create a new controller
|
# collection is a member action, so we create a new controller
|
||||||
# for it.
|
# for it.
|
||||||
|
@ -20,7 +20,6 @@ import pecan
|
|||||||
from pecan import request
|
from pecan import request
|
||||||
import six.moves.urllib.parse as urlparse
|
import six.moves.urllib.parse as urlparse
|
||||||
|
|
||||||
from neutron._i18n import _LW
|
|
||||||
from neutron.api.v2 import attributes
|
from neutron.api.v2 import attributes
|
||||||
from neutron.api.views import versions as versions_view
|
from neutron.api.views import versions as versions_view
|
||||||
from neutron import manager
|
from neutron import manager
|
||||||
@ -120,8 +119,8 @@ class V2Controller(object):
|
|||||||
controller = manager.NeutronManager.get_controller_for_resource(
|
controller = manager.NeutronManager.get_controller_for_resource(
|
||||||
collection)
|
collection)
|
||||||
if not controller:
|
if not controller:
|
||||||
LOG.warning(_LW("No controller found for: %s - returning response "
|
LOG.warning("No controller found for: %s - returning response "
|
||||||
"code 404"), collection)
|
"code 404", collection)
|
||||||
pecan.abort(404)
|
pecan.abort(404)
|
||||||
# Store resource and collection names in pecan request context so that
|
# Store resource and collection names in pecan request context so that
|
||||||
# hooks can leverage them if necessary. The following code uses
|
# hooks can leverage them if necessary. The following code uses
|
||||||
|
@ -17,7 +17,6 @@ import oslo_i18n
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from pecan import hooks
|
from pecan import hooks
|
||||||
|
|
||||||
from neutron._i18n import _LE, _LI
|
|
||||||
from neutron.api import api_common
|
from neutron.api import api_common
|
||||||
from neutron.api.v2 import base as v2base
|
from neutron.api.v2 import base as v2base
|
||||||
|
|
||||||
@ -34,8 +33,8 @@ class ExceptionTranslationHook(hooks.PecanHook):
|
|||||||
exc = api_common.convert_exception_to_http_exc(e, v2base.FAULT_MAP,
|
exc = api_common.convert_exception_to_http_exc(e, v2base.FAULT_MAP,
|
||||||
language)
|
language)
|
||||||
if hasattr(exc, 'code') and 400 <= exc.code < 500:
|
if hasattr(exc, 'code') and 400 <= exc.code < 500:
|
||||||
LOG.info(_LI('%(action)s failed (client error): %(exc)s'),
|
LOG.info('%(action)s failed (client error): %(exc)s',
|
||||||
{'action': state.request.method, 'exc': exc})
|
{'action': state.request.method, 'exc': exc})
|
||||||
else:
|
else:
|
||||||
LOG.exception(_LE('%s failed.'), state.request.method)
|
LOG.exception('%s failed.', state.request.method)
|
||||||
return exc
|
return exc
|
||||||
|
@ -32,7 +32,7 @@ from oslo_utils import encodeutils
|
|||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
import webob.exc
|
import webob.exc
|
||||||
|
|
||||||
from neutron._i18n import _, _LE, _LI
|
from neutron._i18n import _
|
||||||
from neutron.api.v2 import attributes
|
from neutron.api.v2 import attributes
|
||||||
from neutron.common import exceptions as n_exc
|
from neutron.common import exceptions as n_exc
|
||||||
from neutron.plugins.common import constants as p_const
|
from neutron.plugins.common import constants as p_const
|
||||||
@ -201,7 +201,7 @@ def delete_port_on_error(core_plugin, context, port_id):
|
|||||||
except exceptions.PortNotFound:
|
except exceptions.PortNotFound:
|
||||||
LOG.debug("Port %s not found", port_id)
|
LOG.debug("Port %s not found", port_id)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Failed to delete port: %s"), port_id)
|
LOG.exception("Failed to delete port: %s", port_id)
|
||||||
|
|
||||||
|
|
||||||
@contextlib.contextmanager
|
@contextlib.contextmanager
|
||||||
@ -214,7 +214,7 @@ def update_port_on_error(core_plugin, context, port_id, revert_value):
|
|||||||
core_plugin.update_port(context, port_id,
|
core_plugin.update_port(context, port_id,
|
||||||
{'port': revert_value})
|
{'port': revert_value})
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Failed to update port: %s"), port_id)
|
LOG.exception("Failed to update port: %s", port_id)
|
||||||
|
|
||||||
|
|
||||||
def get_interface_name(name, prefix='', max_len=n_const.DEVICE_NAME_MAX_LEN):
|
def get_interface_name(name, prefix='', max_len=n_const.DEVICE_NAME_MAX_LEN):
|
||||||
@ -240,9 +240,9 @@ def get_interface_name(name, prefix='', max_len=n_const.DEVICE_NAME_MAX_LEN):
|
|||||||
new_name = ('%(prefix)s%(truncated)s%(hash)s' %
|
new_name = ('%(prefix)s%(truncated)s%(hash)s' %
|
||||||
{'prefix': prefix, 'truncated': name[0:namelen],
|
{'prefix': prefix, 'truncated': name[0:namelen],
|
||||||
'hash': hashed_name.hexdigest()[0:INTERFACE_HASH_LEN]})
|
'hash': hashed_name.hexdigest()[0:INTERFACE_HASH_LEN]})
|
||||||
LOG.info(_LI("The requested interface name %(requested_name)s exceeds the "
|
LOG.info("The requested interface name %(requested_name)s exceeds the "
|
||||||
"%(limit)d character limitation. It was shortened to "
|
"%(limit)d character limitation. It was shortened to "
|
||||||
"%(new_name)s to fit."),
|
"%(new_name)s to fit.",
|
||||||
{'requested_name': requested_name,
|
{'requested_name': requested_name,
|
||||||
'limit': max_len, 'new_name': new_name})
|
'limit': max_len, 'new_name': new_name})
|
||||||
return new_name
|
return new_name
|
||||||
|
@ -27,7 +27,7 @@ import six
|
|||||||
from sqlalchemy import or_
|
from sqlalchemy import or_
|
||||||
from sqlalchemy.orm import exc
|
from sqlalchemy.orm import exc
|
||||||
|
|
||||||
from neutron._i18n import _, _LE
|
from neutron._i18n import _
|
||||||
from neutron.db import api as db_api
|
from neutron.db import api as db_api
|
||||||
from neutron.db.models import securitygroup as sg_models
|
from neutron.db.models import securitygroup as sg_models
|
||||||
from neutron.db import models_v2
|
from neutron.db import models_v2
|
||||||
@ -166,7 +166,7 @@ def get_port(context, port_id):
|
|||||||
except exc.NoResultFound:
|
except exc.NoResultFound:
|
||||||
return
|
return
|
||||||
except exc.MultipleResultsFound:
|
except exc.MultipleResultsFound:
|
||||||
LOG.error(_LE("Multiple ports have port_id starting with %s"),
|
LOG.error("Multiple ports have port_id starting with %s",
|
||||||
port_id)
|
port_id)
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -251,7 +251,7 @@ def get_port_binding_host(context, port_id):
|
|||||||
{'port_id': port_id})
|
{'port_id': port_id})
|
||||||
return
|
return
|
||||||
except exc.MultipleResultsFound:
|
except exc.MultipleResultsFound:
|
||||||
LOG.error(_LE("Multiple ports have port_id starting with %s"),
|
LOG.error("Multiple ports have port_id starting with %s",
|
||||||
port_id)
|
port_id)
|
||||||
return
|
return
|
||||||
return query.host
|
return query.host
|
||||||
@ -312,7 +312,7 @@ def partial_port_ids_to_full_ids(context, partial_ids):
|
|||||||
if len(matching) < 1:
|
if len(matching) < 1:
|
||||||
LOG.info("No ports have port_id starting with %s", partial_id)
|
LOG.info("No ports have port_id starting with %s", partial_id)
|
||||||
elif len(matching) > 1:
|
elif len(matching) > 1:
|
||||||
LOG.error(_LE("Multiple ports have port_id starting with %s"),
|
LOG.error("Multiple ports have port_id starting with %s",
|
||||||
partial_id)
|
partial_id)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
@ -20,7 +20,6 @@ from oslo_log import log
|
|||||||
from oslo_serialization import jsonutils
|
from oslo_serialization import jsonutils
|
||||||
import sqlalchemy
|
import sqlalchemy
|
||||||
|
|
||||||
from neutron._i18n import _LW
|
|
||||||
from neutron.db import segments_db
|
from neutron.db import segments_db
|
||||||
from neutron.plugins.ml2 import driver_api as api
|
from neutron.plugins.ml2 import driver_api as api
|
||||||
|
|
||||||
@ -245,7 +244,7 @@ class PortContext(MechanismDriverContext, api.PortContext):
|
|||||||
segment = segments_db.get_segment_by_id(self._plugin_context,
|
segment = segments_db.get_segment_by_id(self._plugin_context,
|
||||||
segment_id)
|
segment_id)
|
||||||
if not segment:
|
if not segment:
|
||||||
LOG.warning(_LW("Could not expand segment %s"), segment_id)
|
LOG.warning("Could not expand segment %s", segment_id)
|
||||||
return segment
|
return segment
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
@ -31,7 +31,6 @@ from oslo_service import service
|
|||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
from osprofiler import profiler
|
from osprofiler import profiler
|
||||||
|
|
||||||
from neutron._i18n import _LE, _LI
|
|
||||||
from neutron.agent.l2 import l2_agent_extensions_manager as ext_manager
|
from neutron.agent.l2 import l2_agent_extensions_manager as ext_manager
|
||||||
from neutron.agent import rpc as agent_rpc
|
from neutron.agent import rpc as agent_rpc
|
||||||
from neutron.agent import securitygroups_rpc as agent_sg_rpc
|
from neutron.agent import securitygroups_rpc as agent_sg_rpc
|
||||||
@ -72,9 +71,9 @@ class CommonAgentLoop(service.Service):
|
|||||||
def _validate_manager_class(self):
|
def _validate_manager_class(self):
|
||||||
if not isinstance(self.mgr,
|
if not isinstance(self.mgr,
|
||||||
amb.CommonAgentManagerBase):
|
amb.CommonAgentManagerBase):
|
||||||
LOG.error(_LE("Manager class must inherit from "
|
LOG.error("Manager class must inherit from "
|
||||||
"CommonAgentManagerBase to ensure CommonAgent "
|
"CommonAgentManagerBase to ensure CommonAgent "
|
||||||
"works properly."))
|
"works properly.")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
def start(self):
|
def start(self):
|
||||||
@ -112,7 +111,7 @@ class CommonAgentLoop(service.Service):
|
|||||||
self.daemon_loop()
|
self.daemon_loop()
|
||||||
|
|
||||||
def stop(self, graceful=True):
|
def stop(self, graceful=True):
|
||||||
LOG.info(_LI("Stopping %s agent."), self.agent_type)
|
LOG.info("Stopping %s agent.", self.agent_type)
|
||||||
if graceful and self.quitting_rpc_timeout:
|
if graceful and self.quitting_rpc_timeout:
|
||||||
self.set_rpc_timeout(self.quitting_rpc_timeout)
|
self.set_rpc_timeout(self.quitting_rpc_timeout)
|
||||||
super(CommonAgentLoop, self).stop(graceful)
|
super(CommonAgentLoop, self).stop(graceful)
|
||||||
@ -128,22 +127,22 @@ class CommonAgentLoop(service.Service):
|
|||||||
self.agent_state,
|
self.agent_state,
|
||||||
True)
|
True)
|
||||||
if agent_status == n_const.AGENT_REVIVED:
|
if agent_status == n_const.AGENT_REVIVED:
|
||||||
LOG.info(_LI('%s Agent has just been revived. '
|
LOG.info('%s Agent has just been revived. '
|
||||||
'Doing a full sync.'),
|
'Doing a full sync.',
|
||||||
self.agent_type)
|
self.agent_type)
|
||||||
self.fullsync = True
|
self.fullsync = True
|
||||||
# we only want to update resource versions on startup
|
# we only want to update resource versions on startup
|
||||||
self.agent_state.pop('resource_versions', None)
|
self.agent_state.pop('resource_versions', None)
|
||||||
self.agent_state.pop('start_flag', None)
|
self.agent_state.pop('start_flag', None)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Failed reporting state!"))
|
LOG.exception("Failed reporting state!")
|
||||||
|
|
||||||
def _validate_rpc_endpoints(self):
|
def _validate_rpc_endpoints(self):
|
||||||
if not isinstance(self.endpoints[0],
|
if not isinstance(self.endpoints[0],
|
||||||
amb.CommonAgentManagerRpcCallBackBase):
|
amb.CommonAgentManagerRpcCallBackBase):
|
||||||
LOG.error(_LE("RPC Callback class must inherit from "
|
LOG.error("RPC Callback class must inherit from "
|
||||||
"CommonAgentManagerRpcCallBackBase to ensure "
|
"CommonAgentManagerRpcCallBackBase to ensure "
|
||||||
"CommonAgent works properly."))
|
"CommonAgent works properly.")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
def setup_rpc(self):
|
def setup_rpc(self):
|
||||||
@ -153,7 +152,7 @@ class CommonAgentLoop(service.Service):
|
|||||||
self.context, self.sg_plugin_rpc, defer_refresh_firewall=True)
|
self.context, self.sg_plugin_rpc, defer_refresh_firewall=True)
|
||||||
|
|
||||||
self.agent_id = self.mgr.get_agent_id()
|
self.agent_id = self.mgr.get_agent_id()
|
||||||
LOG.info(_LI("RPC agent_id: %s"), self.agent_id)
|
LOG.info("RPC agent_id: %s", self.agent_id)
|
||||||
|
|
||||||
self.topic = topics.AGENT
|
self.topic = topics.AGENT
|
||||||
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS)
|
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS)
|
||||||
@ -217,7 +216,7 @@ class CommonAgentLoop(service.Service):
|
|||||||
devices_details_list = self.plugin_rpc.get_devices_details_list(
|
devices_details_list = self.plugin_rpc.get_devices_details_list(
|
||||||
self.context, devices, self.agent_id, host=cfg.CONF.host)
|
self.context, devices, self.agent_id, host=cfg.CONF.host)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Unable to get port details for %s"), devices)
|
LOG.exception("Unable to get port details for %s", devices)
|
||||||
# resync is needed
|
# resync is needed
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@ -234,7 +233,7 @@ class CommonAgentLoop(service.Service):
|
|||||||
LOG.debug("Port %s added", device)
|
LOG.debug("Port %s added", device)
|
||||||
|
|
||||||
if 'port_id' in device_details:
|
if 'port_id' in device_details:
|
||||||
LOG.info(_LI("Port %(device)s updated. Details: %(details)s"),
|
LOG.info("Port %(device)s updated. Details: %(details)s",
|
||||||
{'device': device, 'details': device_details})
|
{'device': device, 'details': device_details})
|
||||||
self.mgr.setup_arp_spoofing_protection(device,
|
self.mgr.setup_arp_spoofing_protection(device,
|
||||||
device_details)
|
device_details)
|
||||||
@ -312,7 +311,7 @@ class CommonAgentLoop(service.Service):
|
|||||||
context=self.context,
|
context=self.context,
|
||||||
device_details=device_details)
|
device_details=device_details)
|
||||||
else:
|
else:
|
||||||
LOG.info(_LI("Device %s not defined on plugin"), device)
|
LOG.info("Device %s not defined on plugin", device)
|
||||||
|
|
||||||
@contextlib.contextmanager
|
@contextlib.contextmanager
|
||||||
def _ignore_missing_device_exceptions(self, device):
|
def _ignore_missing_device_exceptions(self, device):
|
||||||
@ -328,7 +327,7 @@ class CommonAgentLoop(service.Service):
|
|||||||
resync = False
|
resync = False
|
||||||
self.sg_agent.remove_devices_filter(devices)
|
self.sg_agent.remove_devices_filter(devices)
|
||||||
for device in devices:
|
for device in devices:
|
||||||
LOG.info(_LI("Attachment %s removed"), device)
|
LOG.info("Attachment %s removed", device)
|
||||||
details = None
|
details = None
|
||||||
try:
|
try:
|
||||||
details = self.plugin_rpc.update_device_down(self.context,
|
details = self.plugin_rpc.update_device_down(self.context,
|
||||||
@ -336,11 +335,11 @@ class CommonAgentLoop(service.Service):
|
|||||||
self.agent_id,
|
self.agent_id,
|
||||||
cfg.CONF.host)
|
cfg.CONF.host)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Error occurred while removing port %s"),
|
LOG.exception("Error occurred while removing port %s",
|
||||||
device)
|
device)
|
||||||
resync = True
|
resync = True
|
||||||
if details and details['exists']:
|
if details and details['exists']:
|
||||||
LOG.info(_LI("Port %s updated."), device)
|
LOG.info("Port %s updated.", device)
|
||||||
else:
|
else:
|
||||||
LOG.debug("Device %s not defined on plugin", device)
|
LOG.debug("Device %s not defined on plugin", device)
|
||||||
port_id = self._clean_network_ports(device)
|
port_id = self._clean_network_ports(device)
|
||||||
@ -429,7 +428,7 @@ class CommonAgentLoop(service.Service):
|
|||||||
or device_info.get('removed'))
|
or device_info.get('removed'))
|
||||||
|
|
||||||
def daemon_loop(self):
|
def daemon_loop(self):
|
||||||
LOG.info(_LI("%s Agent RPC Daemon Started!"), self.agent_type)
|
LOG.info("%s Agent RPC Daemon Started!", self.agent_type)
|
||||||
device_info = None
|
device_info = None
|
||||||
sync = True
|
sync = True
|
||||||
|
|
||||||
@ -441,7 +440,7 @@ class CommonAgentLoop(service.Service):
|
|||||||
self.fullsync = False
|
self.fullsync = False
|
||||||
|
|
||||||
if sync:
|
if sync:
|
||||||
LOG.info(_LI("%s Agent out of sync with plugin!"),
|
LOG.info("%s Agent out of sync with plugin!",
|
||||||
self.agent_type)
|
self.agent_type)
|
||||||
|
|
||||||
device_info = self.scan_devices(previous=device_info, sync=sync)
|
device_info = self.scan_devices(previous=device_info, sync=sync)
|
||||||
@ -453,7 +452,7 @@ class CommonAgentLoop(service.Service):
|
|||||||
try:
|
try:
|
||||||
sync = self.process_network_devices(device_info)
|
sync = self.process_network_devices(device_info)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_LE("Error in agent loop. Devices info: %s"),
|
LOG.exception("Error in agent loop. Devices info: %s",
|
||||||
device_info)
|
device_info)
|
||||||
sync = True
|
sync = True
|
||||||
|
|
||||||
|
@ -21,7 +21,6 @@ from oslo_config import cfg
|
|||||||
from oslo_db import exception as db_exc
|
from oslo_db import exception as db_exc
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
|
|
||||||
from neutron._i18n import _LE
|
|
||||||
from neutron.common import exceptions as exc
|
from neutron.common import exceptions as exc
|
||||||
from neutron.db import api as db_api
|
from neutron.db import api as db_api
|
||||||
from neutron.objects import base as base_obj
|
from neutron.objects import base as base_obj
|
||||||
@ -43,7 +42,7 @@ class BaseTypeDriver(api.ML2TypeDriver):
|
|||||||
cfg.CONF.ml2.physical_network_mtus, unique_values=False
|
cfg.CONF.ml2.physical_network_mtus, unique_values=False
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Failed to parse physical_network_mtus: %s"), e)
|
LOG.error("Failed to parse physical_network_mtus: %s", e)
|
||||||
self.physnet_mtus = []
|
self.physnet_mtus = []
|
||||||
|
|
||||||
def get_mtu(self, physical_network=None):
|
def get_mtu(self, physical_network=None):
|
||||||
|
@ -22,7 +22,7 @@ from neutron_lib.plugins.ml2 import api
|
|||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
|
||||||
from neutron._i18n import _, _LW
|
from neutron._i18n import _
|
||||||
from neutron.conf.plugins.ml2.drivers import l2pop as config
|
from neutron.conf.plugins.ml2.drivers import l2pop as config
|
||||||
from neutron.db import api as db_api
|
from neutron.db import api as db_api
|
||||||
from neutron.db import l3_hamode_db
|
from neutron.db import l3_hamode_db
|
||||||
@ -264,7 +264,7 @@ class L2populationMechanismDriver(api.MechanismDriver):
|
|||||||
port_context = context._plugin_context
|
port_context = context._plugin_context
|
||||||
agent = l2pop_db.get_agent_by_host(session, agent_host)
|
agent = l2pop_db.get_agent_by_host(session, agent_host)
|
||||||
if not agent:
|
if not agent:
|
||||||
LOG.warning(_LW("Unable to retrieve active L2 agent on host %s"),
|
LOG.warning("Unable to retrieve active L2 agent on host %s",
|
||||||
agent_host)
|
agent_host)
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -319,7 +319,7 @@ class L2populationMechanismDriver(api.MechanismDriver):
|
|||||||
agent = l2pop_db.get_agent_by_host(session,
|
agent = l2pop_db.get_agent_by_host(session,
|
||||||
agent_host)
|
agent_host)
|
||||||
if not agent:
|
if not agent:
|
||||||
LOG.warning(_LW("Unable to retrieve active L2 agent on host %s"),
|
LOG.warning("Unable to retrieve active L2 agent on host %s",
|
||||||
agent_host)
|
agent_host)
|
||||||
return
|
return
|
||||||
if not self._validate_segment(segment, port['id'], agent):
|
if not self._validate_segment(segment, port['id'], agent):
|
||||||
|
@ -19,7 +19,6 @@ from oslo_concurrency import lockutils
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
import tenacity
|
import tenacity
|
||||||
|
|
||||||
from neutron._i18n import _LI
|
|
||||||
from neutron.agent.linux import ip_lib
|
from neutron.agent.linux import ip_lib
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
@ -31,8 +30,8 @@ def setup_arp_spoofing_protection(vif, port_details):
|
|||||||
if not port_details.get('port_security_enabled', True):
|
if not port_details.get('port_security_enabled', True):
|
||||||
# clear any previous entries related to this port
|
# clear any previous entries related to this port
|
||||||
delete_arp_spoofing_protection([vif])
|
delete_arp_spoofing_protection([vif])
|
||||||
LOG.info(_LI("Skipping ARP spoofing rules for port '%s' because "
|
LOG.info("Skipping ARP spoofing rules for port '%s' because "
|
||||||
"it has port security disabled"), vif)
|
"it has port security disabled", vif)
|
||||||
return
|
return
|
||||||
if net.is_port_trusted(port_details):
|
if net.is_port_trusted(port_details):
|
||||||
# clear any previous entries related to this port
|
# clear any previous entries related to this port
|
||||||
@ -100,7 +99,7 @@ def delete_unreferenced_arp_protection(current_vifs):
|
|||||||
devname = line.split(SPOOF_CHAIN_PREFIX, 1)[1].split(',')[0]
|
devname = line.split(SPOOF_CHAIN_PREFIX, 1)[1].split(',')[0]
|
||||||
if devname not in current_vifs:
|
if devname not in current_vifs:
|
||||||
to_delete.append(devname)
|
to_delete.append(devname)
|
||||||
LOG.info(_LI("Clearing orphaned ARP spoofing entries for devices %s"),
|
LOG.info("Clearing orphaned ARP spoofing entries for devices %s",
|
||||||
to_delete)
|
to_delete)
|
||||||
_delete_arp_spoofing_protection(to_delete, current_rules)
|
_delete_arp_spoofing_protection(to_delete, current_rules)
|
||||||
|
|
||||||
|
@ -16,7 +16,6 @@
|
|||||||
from neutron_lib import constants as n_const
|
from neutron_lib import constants as n_const
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
|
|
||||||
from neutron._i18n import _LW
|
|
||||||
from neutron.plugins.ml2.drivers.linuxbridge.agent.common import constants
|
from neutron.plugins.ml2.drivers.linuxbridge.agent.common import constants
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
@ -25,8 +24,8 @@ LOG = log.getLogger(__name__)
|
|||||||
def get_tap_device_name(interface_id):
|
def get_tap_device_name(interface_id):
|
||||||
"""Convert port ID into device name format expected by linux bridge."""
|
"""Convert port ID into device name format expected by linux bridge."""
|
||||||
if not interface_id:
|
if not interface_id:
|
||||||
LOG.warning(_LW("Invalid Interface ID, will lead to incorrect "
|
LOG.warning("Invalid Interface ID, will lead to incorrect "
|
||||||
"tap device name"))
|
"tap device name")
|
||||||
tap_device_name = (n_const.TAP_DEVICE_PREFIX +
|
tap_device_name = (n_const.TAP_DEVICE_PREFIX +
|
||||||
interface_id[:constants.RESOURCE_ID_LENGTH])
|
interface_id[:constants.RESOURCE_ID_LENGTH])
|
||||||
return tap_device_name
|
return tap_device_name
|
||||||
|
@ -16,7 +16,6 @@ from oslo_config import cfg
|
|||||||
from oslo_log import helpers as log_helpers
|
from oslo_log import helpers as log_helpers
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
|
|
||||||
from neutron._i18n import _LI
|
|
||||||
from neutron.agent.l2.extensions import qos_linux as qos
|
from neutron.agent.l2.extensions import qos_linux as qos
|
||||||
from neutron.agent.linux import iptables_manager
|
from neutron.agent.linux import iptables_manager
|
||||||
from neutron.agent.linux import tc_lib
|
from neutron.agent.linux import tc_lib
|
||||||
@ -41,7 +40,7 @@ class QosLinuxbridgeAgentDriver(qos.QosLinuxAgentDriver):
|
|||||||
const.EGRESS_DIRECTION: "o"}
|
const.EGRESS_DIRECTION: "o"}
|
||||||
|
|
||||||
def initialize(self):
|
def initialize(self):
|
||||||
LOG.info(_LI("Initializing Linux bridge QoS extension"))
|
LOG.info("Initializing Linux bridge QoS extension")
|
||||||
self.iptables_manager = iptables_manager.IptablesManager(use_ipv6=True)
|
self.iptables_manager = iptables_manager.IptablesManager(use_ipv6=True)
|
||||||
self.tbf_latency = cfg.CONF.QOS.tbf_latency
|
self.tbf_latency = cfg.CONF.QOS.tbf_latency
|
||||||
|
|
||||||
|
@ -31,7 +31,6 @@ from oslo_service import service
|
|||||||
from oslo_utils import excutils
|
from oslo_utils import excutils
|
||||||
from six import moves
|
from six import moves
|
||||||
|
|
||||||
from neutron._i18n import _LE, _LI, _LW
|
|
||||||
from neutron.agent.linux import bridge_lib
|
from neutron.agent.linux import bridge_lib
|
||||||
from neutron.agent.linux import ip_lib
|
from neutron.agent.linux import ip_lib
|
||||||
from neutron.api.rpc.handlers import securitygroups_rpc as sg_rpc
|
from neutron.api.rpc.handlers import securitygroups_rpc as sg_rpc
|
||||||
@ -85,16 +84,16 @@ class LinuxBridgeManager(amb.CommonAgentManagerBase):
|
|||||||
def validate_interface_mappings(self):
|
def validate_interface_mappings(self):
|
||||||
for physnet, interface in self.interface_mappings.items():
|
for physnet, interface in self.interface_mappings.items():
|
||||||
if not ip_lib.device_exists(interface):
|
if not ip_lib.device_exists(interface):
|
||||||
LOG.error(_LE("Interface %(intf)s for physical network %(net)s"
|
LOG.error("Interface %(intf)s for physical network %(net)s"
|
||||||
" does not exist. Agent terminated!"),
|
" does not exist. Agent terminated!",
|
||||||
{'intf': interface, 'net': physnet})
|
{'intf': interface, 'net': physnet})
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
def validate_bridge_mappings(self):
|
def validate_bridge_mappings(self):
|
||||||
for physnet, bridge in self.bridge_mappings.items():
|
for physnet, bridge in self.bridge_mappings.items():
|
||||||
if not ip_lib.device_exists(bridge):
|
if not ip_lib.device_exists(bridge):
|
||||||
LOG.error(_LE("Bridge %(brq)s for physical network %(net)s"
|
LOG.error("Bridge %(brq)s for physical network %(net)s"
|
||||||
" does not exist. Agent terminated!"),
|
" does not exist. Agent terminated!",
|
||||||
{'brq': bridge, 'net': physnet})
|
{'brq': bridge, 'net': physnet})
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
@ -133,10 +132,10 @@ class LinuxBridgeManager(amb.CommonAgentManagerBase):
|
|||||||
if not ip_addr.version == group_net.version:
|
if not ip_addr.version == group_net.version:
|
||||||
raise ValueError()
|
raise ValueError()
|
||||||
except (netaddr.core.AddrFormatError, ValueError):
|
except (netaddr.core.AddrFormatError, ValueError):
|
||||||
LOG.error(_LE("Invalid VXLAN Group: %(group)s, must be an address "
|
LOG.error("Invalid VXLAN Group: %(group)s, must be an address "
|
||||||
"or network (in CIDR notation) in a multicast "
|
"or network (in CIDR notation) in a multicast "
|
||||||
"range of the same address family as local_ip: "
|
"range of the same address family as local_ip: "
|
||||||
"%(ip)s"),
|
"%(ip)s",
|
||||||
{'group': cfg.CONF.VXLAN.vxlan_group,
|
{'group': cfg.CONF.VXLAN.vxlan_group,
|
||||||
'ip': self.local_ip})
|
'ip': self.local_ip})
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
@ -145,10 +144,10 @@ class LinuxBridgeManager(amb.CommonAgentManagerBase):
|
|||||||
"""Return the device with local_ip on the host."""
|
"""Return the device with local_ip on the host."""
|
||||||
device = self.ip.get_device_by_ip(self.local_ip)
|
device = self.ip.get_device_by_ip(self.local_ip)
|
||||||
if not device:
|
if not device:
|
||||||
LOG.error(_LE("Tunneling cannot be enabled without the local_ip "
|
LOG.error("Tunneling cannot be enabled without the local_ip "
|
||||||
"bound to an interface on the host. Please "
|
"bound to an interface on the host. Please "
|
||||||
"configure local_ip %s on the host interface to "
|
"configure local_ip %s on the host interface to "
|
||||||
"be used for tunneling and restart the agent."),
|
"be used for tunneling and restart the agent.",
|
||||||
self.local_ip)
|
self.local_ip)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
return device
|
return device
|
||||||
@ -161,16 +160,16 @@ class LinuxBridgeManager(amb.CommonAgentManagerBase):
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
def get_bridge_name(network_id):
|
def get_bridge_name(network_id):
|
||||||
if not network_id:
|
if not network_id:
|
||||||
LOG.warning(_LW("Invalid Network ID, will lead to incorrect "
|
LOG.warning("Invalid Network ID, will lead to incorrect "
|
||||||
"bridge name"))
|
"bridge name")
|
||||||
bridge_name = BRIDGE_NAME_PREFIX + \
|
bridge_name = BRIDGE_NAME_PREFIX + \
|
||||||
network_id[:lconst.RESOURCE_ID_LENGTH]
|
network_id[:lconst.RESOURCE_ID_LENGTH]
|
||||||
return bridge_name
|
return bridge_name
|
||||||
|
|
||||||
def get_subinterface_name(self, physical_interface, vlan_id):
|
def get_subinterface_name(self, physical_interface, vlan_id):
|
||||||
if not vlan_id:
|
if not vlan_id:
|
||||||
LOG.warning(_LW("Invalid VLAN ID, will lead to incorrect "
|
LOG.warning("Invalid VLAN ID, will lead to incorrect "
|
||||||
"subinterface name"))
|
"subinterface name")
|
||||||
vlan_postfix = '.%s' % vlan_id
|
vlan_postfix = '.%s' % vlan_id
|
||||||
|
|
||||||
# For the vlan subinterface name prefix we use:
|
# For the vlan subinterface name prefix we use:
|
||||||
@ -206,8 +205,8 @@ class LinuxBridgeManager(amb.CommonAgentManagerBase):
|
|||||||
if 0 <= int(segmentation_id) <= p_const.MAX_VXLAN_VNI:
|
if 0 <= int(segmentation_id) <= p_const.MAX_VXLAN_VNI:
|
||||||
return VXLAN_INTERFACE_PREFIX + str(segmentation_id)
|
return VXLAN_INTERFACE_PREFIX + str(segmentation_id)
|
||||||
else:
|
else:
|
||||||
LOG.warning(_LW("Invalid Segmentation ID: %s, will lead to "
|
LOG.warning("Invalid Segmentation ID: %s, will lead to "
|
||||||
"incorrect vxlan device name"), segmentation_id)
|
"incorrect vxlan device name", segmentation_id)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _match_multicast_range(segmentation_id):
|
def _match_multicast_range(segmentation_id):
|
||||||
@ -252,8 +251,8 @@ class LinuxBridgeManager(amb.CommonAgentManagerBase):
|
|||||||
"""Create a vxlan and bridge unless they already exist."""
|
"""Create a vxlan and bridge unless they already exist."""
|
||||||
interface = self.ensure_vxlan(segmentation_id)
|
interface = self.ensure_vxlan(segmentation_id)
|
||||||
if not interface:
|
if not interface:
|
||||||
LOG.error(_LE("Failed creating vxlan interface for "
|
LOG.error("Failed creating vxlan interface for "
|
||||||
"%(segmentation_id)s"),
|
"%(segmentation_id)s",
|
||||||
{segmentation_id: segmentation_id})
|
{segmentation_id: segmentation_id})
|
||||||
return
|
return
|
||||||
bridge_name = self.get_bridge_name(network_id)
|
bridge_name = self.get_bridge_name(network_id)
|
||||||
@ -304,9 +303,9 @@ class LinuxBridgeManager(amb.CommonAgentManagerBase):
|
|||||||
with excutils.save_and_reraise_exception() as ctxt:
|
with excutils.save_and_reraise_exception() as ctxt:
|
||||||
if ip_lib.vlan_in_use(vlan_id):
|
if ip_lib.vlan_in_use(vlan_id):
|
||||||
ctxt.reraise = False
|
ctxt.reraise = False
|
||||||
LOG.error(_LE("Unable to create VLAN interface for "
|
LOG.error("Unable to create VLAN interface for "
|
||||||
"VLAN ID %s because it is in use by "
|
"VLAN ID %s because it is in use by "
|
||||||
"another interface."), vlan_id)
|
"another interface.", vlan_id)
|
||||||
return
|
return
|
||||||
int_vlan.disable_ipv6()
|
int_vlan.disable_ipv6()
|
||||||
int_vlan.link.set_up()
|
int_vlan.link.set_up()
|
||||||
@ -341,9 +340,9 @@ class LinuxBridgeManager(amb.CommonAgentManagerBase):
|
|||||||
# to avoid excessive lookups and a possible race condition.
|
# to avoid excessive lookups and a possible race condition.
|
||||||
if ip_lib.vxlan_in_use(segmentation_id):
|
if ip_lib.vxlan_in_use(segmentation_id):
|
||||||
ctxt.reraise = False
|
ctxt.reraise = False
|
||||||
LOG.error(_LE("Unable to create VXLAN interface for "
|
LOG.error("Unable to create VXLAN interface for "
|
||||||
"VNI %s because it is in use by another "
|
"VNI %s because it is in use by another "
|
||||||
"interface."), segmentation_id)
|
"interface.", segmentation_id)
|
||||||
return None
|
return None
|
||||||
int_vxlan.disable_ipv6()
|
int_vxlan.disable_ipv6()
|
||||||
int_vxlan.link.set_up()
|
int_vxlan.link.set_up()
|
||||||
@ -445,8 +444,8 @@ class LinuxBridgeManager(amb.CommonAgentManagerBase):
|
|||||||
|
|
||||||
bridge_device.addif(interface)
|
bridge_device.addif(interface)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error(_LE("Unable to add %(interface)s to %(bridge_name)s"
|
LOG.error("Unable to add %(interface)s to %(bridge_name)s"
|
||||||
"! Exception: %(e)s"),
|
"! Exception: %(e)s",
|
||||||
{'interface': interface, 'bridge_name': bridge_name,
|
{'interface': interface, 'bridge_name': bridge_name,
|
||||||
'e': e})
|
'e': e})
|
||||||
return
|
return
|
||||||
@ -458,7 +457,7 @@ class LinuxBridgeManager(amb.CommonAgentManagerBase):
|
|||||||
segmentation_id):
|
segmentation_id):
|
||||||
if network_type == p_const.TYPE_VXLAN:
|
if network_type == p_const.TYPE_VXLAN:
|
||||||
if self.vxlan_mode == lconst.VXLAN_NONE:
|
if self.vxlan_mode == lconst.VXLAN_NONE:
|
||||||
LOG.error(_LE("Unable to add vxlan interface for network %s"),
|
LOG.error("Unable to add vxlan interface for network %s",
|
||||||
network_id)
|
network_id)
|
||||||
return
|
return
|
||||||
return self.ensure_vxlan_bridge(network_id, segmentation_id)
|
return self.ensure_vxlan_bridge(network_id, segmentation_id)
|
||||||
@ -467,8 +466,8 @@ class LinuxBridgeManager(amb.CommonAgentManagerBase):
|
|||||||
physical_bridge = self.get_existing_bridge_name(physical_network)
|
physical_bridge = self.get_existing_bridge_name(physical_network)
|
||||||
physical_interface = self.interface_mappings.get(physical_network)
|
physical_interface = self.interface_mappings.get(physical_network)
|
||||||
if not physical_bridge and not physical_interface:
|
if not physical_bridge and not physical_interface:
|
||||||
LOG.error(_LE("No bridge or interface mappings"
|
LOG.error("No bridge or interface mappings"
|
||||||
" for physical network %s"),
|
" for physical network %s",
|
||||||
physical_network)
|
physical_network)
|
||||||
return
|
return
|
||||||
if network_type == p_const.TYPE_FLAT:
|
if network_type == p_const.TYPE_FLAT:
|
||||||
@ -479,9 +478,9 @@ class LinuxBridgeManager(amb.CommonAgentManagerBase):
|
|||||||
physical_interface,
|
physical_interface,
|
||||||
segmentation_id)
|
segmentation_id)
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE("Unknown network_type %(network_type)s for network "
|
LOG.error("Unknown network_type %(network_type)s for network "
|
||||||
"%(network_id)s."), {network_type: network_type,
|
"%(network_id)s.", {network_type: network_type,
|
||||||
network_id: network_id})
|
network_id: network_id})
|
||||||
|
|
||||||
def add_tap_interface(self, network_id, network_type, physical_network,
|
def add_tap_interface(self, network_id, network_type, physical_network,
|
||||||
segmentation_id, tap_device_name, device_owner, mtu):
|
segmentation_id, tap_device_name, device_owner, mtu):
|
||||||
@ -664,8 +663,8 @@ class LinuxBridgeManager(amb.CommonAgentManagerBase):
|
|||||||
return False
|
return False
|
||||||
if not ip_lib.iproute_arg_supported(
|
if not ip_lib.iproute_arg_supported(
|
||||||
['bridge', 'fdb'], 'append'):
|
['bridge', 'fdb'], 'append'):
|
||||||
LOG.warning(_LW('Option "%(option)s" must be supported by command '
|
LOG.warning('Option "%(option)s" must be supported by command '
|
||||||
'"%(command)s" to enable %(mode)s mode'),
|
'"%(command)s" to enable %(mode)s mode',
|
||||||
{'option': 'append',
|
{'option': 'append',
|
||||||
'command': 'bridge fdb',
|
'command': 'bridge fdb',
|
||||||
'mode': 'VXLAN UCAST'})
|
'mode': 'VXLAN UCAST'})
|
||||||
@ -679,7 +678,7 @@ class LinuxBridgeManager(amb.CommonAgentManagerBase):
|
|||||||
test_iface = self.ensure_vxlan(seg_id)
|
test_iface = self.ensure_vxlan(seg_id)
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE('No valid Segmentation ID to perform UCAST test.'))
|
LOG.error('No valid Segmentation ID to perform UCAST test.')
|
||||||
return False
|
return False
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -694,14 +693,14 @@ class LinuxBridgeManager(amb.CommonAgentManagerBase):
|
|||||||
|
|
||||||
def vxlan_mcast_supported(self):
|
def vxlan_mcast_supported(self):
|
||||||
if not cfg.CONF.VXLAN.vxlan_group:
|
if not cfg.CONF.VXLAN.vxlan_group:
|
||||||
LOG.warning(_LW('VXLAN muticast group(s) must be provided in '
|
LOG.warning('VXLAN muticast group(s) must be provided in '
|
||||||
'vxlan_group option to enable VXLAN MCAST mode'))
|
'vxlan_group option to enable VXLAN MCAST mode')
|
||||||
return False
|
return False
|
||||||
if not ip_lib.iproute_arg_supported(
|
if not ip_lib.iproute_arg_supported(
|
||||||
['ip', 'link', 'add', 'type', 'vxlan'],
|
['ip', 'link', 'add', 'type', 'vxlan'],
|
||||||
'proxy'):
|
'proxy'):
|
||||||
LOG.warning(_LW('Option "%(option)s" must be supported by command '
|
LOG.warning('Option "%(option)s" must be supported by command '
|
||||||
'"%(command)s" to enable %(mode)s mode'),
|
'"%(command)s" to enable %(mode)s mode',
|
||||||
{'option': 'proxy',
|
{'option': 'proxy',
|
||||||
'command': 'ip link add type vxlan',
|
'command': 'ip link add type vxlan',
|
||||||
'mode': 'VXLAN MCAST'})
|
'mode': 'VXLAN MCAST'})
|
||||||
@ -776,8 +775,8 @@ class LinuxBridgeManager(amb.CommonAgentManagerBase):
|
|||||||
if mac:
|
if mac:
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
LOG.error(_LE("Unable to obtain MAC address for unique ID. "
|
LOG.error("Unable to obtain MAC address for unique ID. "
|
||||||
"Agent terminated!"))
|
"Agent terminated!")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
return 'lb%s' % mac.replace(":", "")
|
return 'lb%s' % mac.replace(":", "")
|
||||||
|
|
||||||
@ -844,8 +843,8 @@ class LinuxBridgeRpcCallbacks(
|
|||||||
if network_id in self.network_map:
|
if network_id in self.network_map:
|
||||||
phynet = self.network_map[network_id].physical_network
|
phynet = self.network_map[network_id].physical_network
|
||||||
if phynet and phynet in self.agent.mgr.bridge_mappings:
|
if phynet and phynet in self.agent.mgr.bridge_mappings:
|
||||||
LOG.info(_LI("Physical network %s is defined in "
|
LOG.info("Physical network %s is defined in "
|
||||||
"bridge_mappings and cannot be deleted."),
|
"bridge_mappings and cannot be deleted.",
|
||||||
network_id)
|
network_id)
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -960,19 +959,19 @@ def main():
|
|||||||
interface_mappings = helpers.parse_mappings(
|
interface_mappings = helpers.parse_mappings(
|
||||||
cfg.CONF.LINUX_BRIDGE.physical_interface_mappings)
|
cfg.CONF.LINUX_BRIDGE.physical_interface_mappings)
|
||||||
except ValueError as e:
|
except ValueError as e:
|
||||||
LOG.error(_LE("Parsing physical_interface_mappings failed: %s. "
|
LOG.error("Parsing physical_interface_mappings failed: %s. "
|
||||||
"Agent terminated!"), e)
|
"Agent terminated!", e)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
LOG.info(_LI("Interface mappings: %s"), interface_mappings)
|
LOG.info("Interface mappings: %s", interface_mappings)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
bridge_mappings = helpers.parse_mappings(
|
bridge_mappings = helpers.parse_mappings(
|
||||||
cfg.CONF.LINUX_BRIDGE.bridge_mappings)
|
cfg.CONF.LINUX_BRIDGE.bridge_mappings)
|
||||||
except ValueError as e:
|
except ValueError as e:
|
||||||
LOG.error(_LE("Parsing bridge_mappings failed: %s. "
|
LOG.error("Parsing bridge_mappings failed: %s. "
|
||||||
"Agent terminated!"), e)
|
"Agent terminated!", e)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
LOG.info(_LI("Bridge mappings: %s"), bridge_mappings)
|
LOG.info("Bridge mappings: %s", bridge_mappings)
|
||||||
|
|
||||||
manager = LinuxBridgeManager(bridge_mappings, interface_mappings)
|
manager = LinuxBridgeManager(bridge_mappings, interface_mappings)
|
||||||
linuxbridge_capabilities.register()
|
linuxbridge_capabilities.register()
|
||||||
@ -983,6 +982,6 @@ def main():
|
|||||||
constants.AGENT_TYPE_LINUXBRIDGE,
|
constants.AGENT_TYPE_LINUXBRIDGE,
|
||||||
LB_AGENT_BINARY)
|
LB_AGENT_BINARY)
|
||||||
setup_profiler.setup("neutron-linuxbridge-agent", cfg.CONF.host)
|
setup_profiler.setup("neutron-linuxbridge-agent", cfg.CONF.host)
|
||||||
LOG.info(_LI("Agent initialized successfully, now running... "))
|
LOG.info("Agent initialized successfully, now running... ")
|
||||||
launcher = service.launch(cfg.CONF, agent)
|
launcher = service.launch(cfg.CONF, agent)
|
||||||
launcher.wait()
|
launcher.wait()
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user