Merge "Vendor decomposition to move CSR1000v support to the networking-cisco repo"
This commit is contained in:
commit
41166d5333
@ -1,344 +0,0 @@
|
||||
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import eventlet
|
||||
eventlet.monkey_patch()
|
||||
import pprint
|
||||
import sys
|
||||
import time
|
||||
|
||||
from oslo_concurrency import lockutils
|
||||
from oslo_config import cfg
|
||||
import oslo_messaging
|
||||
from oslo_utils import importutils
|
||||
from oslo_utils import timeutils
|
||||
|
||||
from neutron.agent.common import config
|
||||
from neutron.agent.linux import external_process
|
||||
from neutron.agent.linux import interface
|
||||
from neutron.agent import rpc as agent_rpc
|
||||
from neutron.common import config as common_config
|
||||
from neutron.common import rpc as n_rpc
|
||||
from neutron.common import topics
|
||||
from neutron import context as n_context
|
||||
from neutron.i18n import _LE, _LI, _LW
|
||||
from neutron import manager
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.openstack.common import loopingcall
|
||||
from neutron.openstack.common import periodic_task
|
||||
from neutron.openstack.common import service
|
||||
from neutron.plugins.cisco.cfg_agent import device_status
|
||||
from neutron.plugins.cisco.common import cisco_constants as c_constants
|
||||
from neutron import service as neutron_service
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
# Constants for agent registration.
|
||||
REGISTRATION_RETRY_DELAY = 2
|
||||
MAX_REGISTRATION_ATTEMPTS = 30
|
||||
|
||||
|
||||
class CiscoDeviceManagementApi(object):
|
||||
"""Agent side of the device manager RPC API."""
|
||||
|
||||
def __init__(self, topic, host):
|
||||
self.host = host
|
||||
target = oslo_messaging.Target(topic=topic, version='1.0')
|
||||
self.client = n_rpc.get_client(target)
|
||||
|
||||
def report_dead_hosting_devices(self, context, hd_ids=None):
|
||||
"""Report that a hosting device cannot be contacted (presumed dead).
|
||||
|
||||
:param: context: session context
|
||||
:param: hosting_device_ids: list of non-responding hosting devices
|
||||
:return: None
|
||||
"""
|
||||
cctxt = self.client.prepare()
|
||||
cctxt.cast(context, 'report_non_responding_hosting_devices',
|
||||
host=self.host, hosting_device_ids=hd_ids)
|
||||
|
||||
def register_for_duty(self, context):
|
||||
"""Report that a config agent is ready for duty."""
|
||||
cctxt = self.client.prepare()
|
||||
return cctxt.call(context, 'register_for_duty', host=self.host)
|
||||
|
||||
|
||||
class CiscoCfgAgent(manager.Manager):
|
||||
"""Cisco Cfg Agent.
|
||||
|
||||
This class defines a generic configuration agent for cisco devices which
|
||||
implement network services in the cloud backend. It is based on the
|
||||
(reference) l3-agent, but has been enhanced to support multiple services
|
||||
in addition to routing.
|
||||
|
||||
The agent acts like as a container for services and does not do any
|
||||
service specific processing or configuration itself.
|
||||
All service specific processing is delegated to service helpers which
|
||||
the agent loads. Thus routing specific updates are processed by the
|
||||
routing service helper, firewall by firewall helper etc.
|
||||
A further layer of abstraction is implemented by using device drivers for
|
||||
encapsulating all configuration operations of a service on a device.
|
||||
Device drivers are specific to a particular device/service VM eg: CSR1kv.
|
||||
|
||||
The main entry points in this class are the `process_services()` and
|
||||
`_backlog_task()` .
|
||||
"""
|
||||
target = oslo_messaging.Target(version='1.1')
|
||||
|
||||
OPTS = [
|
||||
cfg.IntOpt('rpc_loop_interval', default=10,
|
||||
help=_("Interval when the process_services() loop "
|
||||
"executes in seconds. This is when the config agent "
|
||||
"lets each service helper to process its neutron "
|
||||
"resources.")),
|
||||
cfg.StrOpt('routing_svc_helper_class',
|
||||
default='neutron.plugins.cisco.cfg_agent.service_helpers'
|
||||
'.routing_svc_helper.RoutingServiceHelper',
|
||||
help=_("Path of the routing service helper class.")),
|
||||
]
|
||||
|
||||
def __init__(self, host, conf=None):
|
||||
self.conf = conf or cfg.CONF
|
||||
self._dev_status = device_status.DeviceStatus()
|
||||
self.context = n_context.get_admin_context_without_session()
|
||||
|
||||
self._initialize_rpc(host)
|
||||
self._initialize_service_helpers(host)
|
||||
self._start_periodic_tasks()
|
||||
super(CiscoCfgAgent, self).__init__(host=self.conf.host)
|
||||
|
||||
def _initialize_rpc(self, host):
|
||||
self.devmgr_rpc = CiscoDeviceManagementApi(topics.L3PLUGIN, host)
|
||||
|
||||
def _initialize_service_helpers(self, host):
|
||||
svc_helper_class = self.conf.cfg_agent.routing_svc_helper_class
|
||||
try:
|
||||
self.routing_service_helper = importutils.import_object(
|
||||
svc_helper_class, host, self.conf, self)
|
||||
except ImportError as e:
|
||||
LOG.warning(_LW("Error in loading routing service helper. Class "
|
||||
"specified is %(class)s. Reason:%(reason)s"),
|
||||
{'class': self.conf.cfg_agent.routing_svc_helper_class,
|
||||
'reason': e})
|
||||
self.routing_service_helper = None
|
||||
|
||||
def _start_periodic_tasks(self):
|
||||
self.loop = loopingcall.FixedIntervalLoopingCall(self.process_services)
|
||||
self.loop.start(interval=self.conf.cfg_agent.rpc_loop_interval)
|
||||
|
||||
def after_start(self):
|
||||
LOG.info(_LI("Cisco cfg agent started"))
|
||||
|
||||
def get_routing_service_helper(self):
|
||||
return self.routing_service_helper
|
||||
|
||||
## Periodic tasks ##
|
||||
@periodic_task.periodic_task
|
||||
def _backlog_task(self, context):
|
||||
"""Process backlogged devices."""
|
||||
LOG.debug("Processing backlog.")
|
||||
self._process_backlogged_hosting_devices(context)
|
||||
|
||||
## Main orchestrator ##
|
||||
@lockutils.synchronized('cisco-cfg-agent', 'neutron-')
|
||||
def process_services(self, device_ids=None, removed_devices_info=None):
|
||||
"""Process services managed by this config agent.
|
||||
|
||||
This method is invoked by any of three scenarios.
|
||||
|
||||
1. Invoked by a periodic task running every `RPC_LOOP_INTERVAL`
|
||||
seconds. This is the most common scenario.
|
||||
In this mode, the method is called without any arguments.
|
||||
|
||||
2. Called by the `_process_backlogged_hosting_devices()` as part of
|
||||
the backlog processing task. In this mode, a list of device_ids
|
||||
are passed as arguments. These are the list of backlogged
|
||||
hosting devices that are now reachable and we want to sync services
|
||||
on them.
|
||||
|
||||
3. Called by the `hosting_devices_removed()` method. This is when
|
||||
the config agent has received a notification from the plugin that
|
||||
some hosting devices are going to be removed. The payload contains
|
||||
the details of the hosting devices and the associated neutron
|
||||
resources on them which should be processed and removed.
|
||||
|
||||
To avoid race conditions with these scenarios, this function is
|
||||
protected by a lock.
|
||||
|
||||
This method goes on to invoke `process_service()` on the
|
||||
different service helpers.
|
||||
|
||||
:param device_ids : List of devices that are now available and needs
|
||||
to be processed
|
||||
:param removed_devices_info: Info about the hosting devices which
|
||||
are going to be removed and details of the resources hosted on them.
|
||||
Expected Format:
|
||||
{
|
||||
'hosting_data': {'hd_id1': {'routers': [id1, id2, ...]},
|
||||
'hd_id2': {'routers': [id3, id4, ...]}, ...},
|
||||
'deconfigure': True/False
|
||||
}
|
||||
:return: None
|
||||
"""
|
||||
LOG.debug("Processing services started")
|
||||
# Now we process only routing service, additional services will be
|
||||
# added in future
|
||||
if self.routing_service_helper:
|
||||
self.routing_service_helper.process_service(device_ids,
|
||||
removed_devices_info)
|
||||
else:
|
||||
LOG.warning(_LW("No routing service helper loaded"))
|
||||
LOG.debug("Processing services completed")
|
||||
|
||||
def _process_backlogged_hosting_devices(self, context):
|
||||
"""Process currently backlogged devices.
|
||||
|
||||
Go through the currently backlogged devices and process them.
|
||||
For devices which are now reachable (compared to last time), we call
|
||||
`process_services()` passing the now reachable device's id.
|
||||
For devices which have passed the `hosting_device_dead_timeout` and
|
||||
hence presumed dead, execute a RPC to the plugin informing that.
|
||||
:param context: RPC context
|
||||
:return: None
|
||||
"""
|
||||
res = self._dev_status.check_backlogged_hosting_devices()
|
||||
if res['reachable']:
|
||||
self.process_services(device_ids=res['reachable'])
|
||||
if res['dead']:
|
||||
LOG.debug("Reporting dead hosting devices: %s", res['dead'])
|
||||
self.devmgr_rpc.report_dead_hosting_devices(context,
|
||||
hd_ids=res['dead'])
|
||||
|
||||
def hosting_devices_removed(self, context, payload):
|
||||
"""Deal with hosting device removed RPC message."""
|
||||
try:
|
||||
if payload['hosting_data']:
|
||||
if payload['hosting_data'].keys():
|
||||
self.process_services(removed_devices_info=payload)
|
||||
except KeyError as e:
|
||||
LOG.error(_LE("Invalid payload format for received RPC message "
|
||||
"`hosting_devices_removed`. Error is %(error)s. "
|
||||
"Payload is %(payload)s"),
|
||||
{'error': e, 'payload': payload})
|
||||
|
||||
|
||||
class CiscoCfgAgentWithStateReport(CiscoCfgAgent):
|
||||
|
||||
def __init__(self, host, conf=None):
|
||||
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
|
||||
self.agent_state = {
|
||||
'binary': 'neutron-cisco-cfg-agent',
|
||||
'host': host,
|
||||
'topic': c_constants.CFG_AGENT,
|
||||
'configurations': {},
|
||||
'start_flag': True,
|
||||
'agent_type': c_constants.AGENT_TYPE_CFG}
|
||||
report_interval = cfg.CONF.AGENT.report_interval
|
||||
self.use_call = True
|
||||
self._initialize_rpc(host)
|
||||
self._agent_registration()
|
||||
super(CiscoCfgAgentWithStateReport, self).__init__(host=host,
|
||||
conf=conf)
|
||||
if report_interval:
|
||||
self.heartbeat = loopingcall.FixedIntervalLoopingCall(
|
||||
self._report_state)
|
||||
self.heartbeat.start(interval=report_interval)
|
||||
|
||||
def _agent_registration(self):
|
||||
"""Register this agent with the server.
|
||||
|
||||
This method registers the cfg agent with the neutron server so hosting
|
||||
devices can be assigned to it. In case the server is not ready to
|
||||
accept registration (it sends a False) then we retry registration
|
||||
for `MAX_REGISTRATION_ATTEMPTS` with a delay of
|
||||
`REGISTRATION_RETRY_DELAY`. If there is no server response or a
|
||||
failure to register after the required number of attempts,
|
||||
the agent stops itself.
|
||||
"""
|
||||
for attempts in xrange(MAX_REGISTRATION_ATTEMPTS):
|
||||
context = n_context.get_admin_context_without_session()
|
||||
self.send_agent_report(self.agent_state, context)
|
||||
res = self.devmgr_rpc.register_for_duty(context)
|
||||
if res is True:
|
||||
LOG.info(_LI("[Agent registration] Agent successfully "
|
||||
"registered"))
|
||||
return
|
||||
elif res is False:
|
||||
LOG.warning(_LW("[Agent registration] Neutron server said "
|
||||
"that device manager was not ready. Retrying "
|
||||
"in %0.2f seconds "), REGISTRATION_RETRY_DELAY)
|
||||
time.sleep(REGISTRATION_RETRY_DELAY)
|
||||
elif res is None:
|
||||
LOG.error(_LE("[Agent registration] Neutron server said that "
|
||||
"no device manager was found. Cannot continue. "
|
||||
"Exiting!"))
|
||||
raise SystemExit("Cfg Agent exiting")
|
||||
LOG.error(_LE("[Agent registration] %d unsuccessful registration "
|
||||
"attempts. Exiting!"), MAX_REGISTRATION_ATTEMPTS)
|
||||
raise SystemExit("Cfg Agent exiting")
|
||||
|
||||
def _report_state(self):
|
||||
"""Report state to the plugin.
|
||||
|
||||
This task run every `report_interval` period.
|
||||
Collects, creates and sends a summary of the services currently
|
||||
managed by this agent. Data is collected from the service helper(s).
|
||||
Refer the `configurations` dict for the parameters reported.
|
||||
:return: None
|
||||
"""
|
||||
LOG.debug("Report state task started")
|
||||
configurations = {}
|
||||
if self.routing_service_helper:
|
||||
configurations = self.routing_service_helper.collect_state(
|
||||
self.agent_state['configurations'])
|
||||
non_responding = self._dev_status.get_backlogged_hosting_devices_info()
|
||||
configurations['non_responding_hosting_devices'] = non_responding
|
||||
self.agent_state['configurations'] = configurations
|
||||
self.agent_state['local_time'] = str(timeutils.utcnow())
|
||||
LOG.debug("State report data: %s", pprint.pformat(self.agent_state))
|
||||
self.send_agent_report(self.agent_state, self.context)
|
||||
|
||||
def send_agent_report(self, report, context):
|
||||
"""Send the agent report via RPC."""
|
||||
try:
|
||||
self.state_rpc.report_state(context, report, self.use_call)
|
||||
report.pop('start_flag', None)
|
||||
self.use_call = False
|
||||
LOG.debug("Send agent report successfully completed")
|
||||
except AttributeError:
|
||||
# This means the server does not support report_state
|
||||
LOG.warning(_LW("Neutron server does not support state report. "
|
||||
"State report for this agent will be disabled."))
|
||||
self.heartbeat.stop()
|
||||
return
|
||||
except Exception:
|
||||
LOG.exception(_LE("Failed sending agent report!"))
|
||||
|
||||
|
||||
def main(manager='neutron.plugins.cisco.cfg_agent.'
|
||||
'cfg_agent.CiscoCfgAgentWithStateReport'):
|
||||
conf = cfg.CONF
|
||||
conf.register_opts(CiscoCfgAgent.OPTS, "cfg_agent")
|
||||
config.register_agent_state_opts_helper(conf)
|
||||
conf.register_opts(interface.OPTS)
|
||||
conf.register_opts(external_process.OPTS)
|
||||
common_config.init(sys.argv[1:])
|
||||
conf(project='neutron')
|
||||
config.setup_logging()
|
||||
server = neutron_service.Service.create(
|
||||
binary='neutron-cisco-cfg-agent',
|
||||
topic=c_constants.CFG_AGENT,
|
||||
report_interval=cfg.CONF.AGENT.report_interval,
|
||||
manager=manager)
|
||||
service.launch(server).wait()
|
@ -1,58 +0,0 @@
|
||||
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Exceptions by Cisco Configuration Agent."""
|
||||
|
||||
from neutron.common import exceptions
|
||||
|
||||
|
||||
class DriverException(exceptions.NeutronException):
|
||||
"""Exception created by the Driver class."""
|
||||
|
||||
|
||||
class CSR1kvInitializationException(DriverException):
|
||||
"""Exception when initialization of CSR1kv Routing Driver object."""
|
||||
message = (_("Critical device parameter missing. Failed initializing "
|
||||
"CSR1kv routing driver."))
|
||||
|
||||
|
||||
class CSR1kvConnectionException(DriverException):
|
||||
"""Connection exception when connecting to CSR1kv hosting device."""
|
||||
message = (_("Failed connecting to CSR1kv. Reason: %(reason)s. "
|
||||
"Connection params are User:%(user)s, Host:%(host)s, "
|
||||
"Port:%(port)s, Device timeout:%(timeout)s."))
|
||||
|
||||
|
||||
class CSR1kvConfigException(DriverException):
|
||||
"""Configuration exception thrown when modifying the running config."""
|
||||
message = (_("Error executing snippet:%(snippet)s. "
|
||||
"ErrorType:%(type)s ErrorTag:%(tag)s."))
|
||||
|
||||
|
||||
class CSR1kvUnknownValueException(DriverException):
|
||||
"""CSR1kv Exception thrown when an unknown value is received."""
|
||||
message = (_("Data in attribute: %(attribute)s does not correspond to "
|
||||
"expected value. Value received is %(value)s. "))
|
||||
|
||||
|
||||
class DriverNotExist(DriverException):
|
||||
message = _("Driver %(driver)s does not exist.")
|
||||
|
||||
|
||||
class DriverNotFound(DriverException):
|
||||
message = _("Driver not found for resource id:%(id)s.")
|
||||
|
||||
|
||||
class DriverNotSetForMissingParameter(DriverException):
|
||||
message = _("Driver cannot be set for missing parameter:%(p)s.")
|
@ -1,353 +0,0 @@
|
||||
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
CSR (IOS-XE) XML-based configuration snippets
|
||||
"""
|
||||
|
||||
# The standard Template used to interact with IOS-XE(CSR).
|
||||
# This template is added by the netconf client
|
||||
# EXEC_CONF_SNIPPET = """
|
||||
# <config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
|
||||
# <configure>
|
||||
# <__XML__MODE__exec_configure>%s
|
||||
# </__XML__MODE__exec_configure>
|
||||
# </configure>
|
||||
# </config>
|
||||
# """
|
||||
|
||||
|
||||
#=================================================#
|
||||
# Set ip address on an interface
|
||||
# $(config)interface GigabitEthernet 1
|
||||
# $(config)ip address 10.0.100.1 255.255.255.0
|
||||
#=================================================#
|
||||
SET_INTC = """
|
||||
<config>
|
||||
<cli-config-data>
|
||||
<cmd>interface %s</cmd>
|
||||
<cmd>ip address %s %s</cmd>
|
||||
</cli-config-data>
|
||||
</config>
|
||||
"""
|
||||
|
||||
#=================================================#
|
||||
# Enable an interface
|
||||
# $(config)interface GigabitEthernet 1
|
||||
# $(config)no shutdown
|
||||
#=================================================#
|
||||
ENABLE_INTF = """
|
||||
<config>
|
||||
<cli-config-data>
|
||||
<cmd>interface %s</cmd>
|
||||
<cmd>no shutdown</cmd>
|
||||
</cli-config-data>
|
||||
</config>
|
||||
"""
|
||||
|
||||
#=================================================#
|
||||
# Create VRF
|
||||
# $(config)vrf definition nrouter-e7d4y5
|
||||
# $(config-vrf)address-family ipv4
|
||||
# $(config-vrf-af)exit-address-family
|
||||
# $(config-vrf)address-family ipv6
|
||||
# $(config-vrf-af)exit-address-family
|
||||
#=================================================#
|
||||
CREATE_VRF = """
|
||||
<config>
|
||||
<cli-config-data>
|
||||
<cmd>vrf definition %s</cmd>
|
||||
<cmd>address-family ipv4</cmd>
|
||||
<cmd>exit-address-family</cmd>
|
||||
<cmd>address-family ipv6</cmd>
|
||||
<cmd>exit-address-family</cmd>
|
||||
</cli-config-data>
|
||||
</config>
|
||||
"""
|
||||
|
||||
#=================================================#
|
||||
# Remove VRF
|
||||
# $(config)no vrf definition nrouter-e7d4y5
|
||||
#=================================================#
|
||||
REMOVE_VRF = """
|
||||
<config>
|
||||
<cli-config-data>
|
||||
<cmd>no vrf definition %s</cmd>
|
||||
</cli-config-data>
|
||||
</config>
|
||||
"""
|
||||
|
||||
#=================================================#
|
||||
# Create Subinterface
|
||||
# $(config)interface GigabitEthernet 2.500
|
||||
# $(config)encapsulation dot1Q 500
|
||||
# $(config)vrf forwarding nrouter-e7d4y5
|
||||
# $(config)ip address 192.168.0.1 255.255.255.0
|
||||
#=================================================#
|
||||
CREATE_SUBINTERFACE = """
|
||||
<config>
|
||||
<cli-config-data>
|
||||
<cmd>interface %s</cmd>
|
||||
<cmd>encapsulation dot1Q %s</cmd>
|
||||
<cmd>vrf forwarding %s</cmd>
|
||||
<cmd>ip address %s %s</cmd>
|
||||
</cli-config-data>
|
||||
</config>
|
||||
|
||||
"""
|
||||
|
||||
#=================================================#
|
||||
# Remove Subinterface
|
||||
# $(config)no interface GigabitEthernet 2.500
|
||||
#=================================================#
|
||||
REMOVE_SUBINTERFACE = """
|
||||
<config>
|
||||
<cli-config-data>
|
||||
<cmd>no interface %s</cmd>
|
||||
</cli-config-data>
|
||||
</config>
|
||||
"""
|
||||
|
||||
#=================================================#
|
||||
# Enable HSRP on a Subinterface
|
||||
# $(config)interface GigabitEthernet 2.500
|
||||
# $(config)vrf forwarding nrouter-e7d4y5
|
||||
# $(config)standby version 2
|
||||
# $(config)standby <group> priority <priority>
|
||||
# $(config)standby <group> ip <ip>
|
||||
#=================================================#
|
||||
SET_INTC_HSRP = """
|
||||
<config>
|
||||
<cli-config-data>
|
||||
<cmd>interface %s</cmd>
|
||||
<cmd>vrf forwarding %s</cmd>
|
||||
<cmd>standby version 2</cmd>
|
||||
<cmd>standby %s priority %s</cmd>
|
||||
<cmd>standby %s ip %s</cmd>
|
||||
</cli-config-data>
|
||||
</config>
|
||||
|
||||
"""
|
||||
|
||||
#=================================================#
|
||||
# Remove HSRP on a Subinterface
|
||||
# $(config)interface GigabitEthernet 2.500
|
||||
# $(config)no standby version 2
|
||||
# $(config)no standby <group>
|
||||
#=================================================#
|
||||
REMOVE_INTC_HSRP = """
|
||||
<config>
|
||||
<cli-config-data>
|
||||
<cmd>interface %s</cmd>
|
||||
<cmd>no standby %s</cmd>
|
||||
<cmd>no standby version 2</cmd>
|
||||
</cli-config-data>
|
||||
</config>
|
||||
|
||||
"""
|
||||
|
||||
|
||||
#=================================================#
|
||||
# Create Access Control List
|
||||
# $(config)ip access-list standard acl_500
|
||||
# $(config)permit 192.168.0.1 255.255.255.0
|
||||
#=================================================#
|
||||
CREATE_ACL = """
|
||||
<config>
|
||||
<cli-config-data>
|
||||
<cmd>ip access-list standard %s</cmd>
|
||||
<cmd>permit %s %s</cmd>
|
||||
</cli-config-data>
|
||||
</config>
|
||||
"""
|
||||
|
||||
#=================================================#
|
||||
# Remove Access Control List
|
||||
# $(config)no ip access-list standard acl_500
|
||||
#=================================================#
|
||||
REMOVE_ACL = """
|
||||
<config>
|
||||
<cli-config-data>
|
||||
<cmd>no ip access-list standard %s</cmd>
|
||||
</cli-config-data>
|
||||
</config>
|
||||
"""
|
||||
|
||||
#=========================================================================#
|
||||
# Set Dynamic source translation on an interface
|
||||
# Syntax: ip nat inside source list <acl_no> interface <interface>
|
||||
# .......vrf <vrf_name> overload
|
||||
# eg: $(config)ip nat inside source list acl_500
|
||||
# ..........interface GigabitEthernet3.100 vrf nrouter-e7d4y5 overload
|
||||
#========================================================================#
|
||||
SNAT_CFG = "ip nat inside source list %s interface %s vrf %s overload"
|
||||
|
||||
SET_DYN_SRC_TRL_INTFC = """
|
||||
<config>
|
||||
<cli-config-data>
|
||||
<cmd>ip nat inside source list %s interface %s vrf %s
|
||||
overload</cmd>
|
||||
</cli-config-data>
|
||||
</config>
|
||||
|
||||
"""
|
||||
|
||||
#=========================================================================#
|
||||
# Remove Dynamic source translation on an interface
|
||||
# Syntax: no ip nat inside source list <acl_no> interface <interface>
|
||||
# .......vrf <vrf_name> overload
|
||||
# eg: $(config)no ip nat inside source list acl_500
|
||||
# ..........interface GigabitEthernet3.100 vrf nrouter-e7d4y5 overload
|
||||
#========================================================================#
|
||||
REMOVE_DYN_SRC_TRL_INTFC = """
|
||||
<config>
|
||||
<cli-config-data>
|
||||
<cmd>no ip nat inside source list %s interface %s vrf %s
|
||||
overload</cmd>
|
||||
</cli-config-data>
|
||||
</config>
|
||||
|
||||
"""
|
||||
|
||||
#=================================================#
|
||||
# Set NAT
|
||||
# Syntax : interface <interface>
|
||||
# ip nat <inside|outside>
|
||||
#=================================================#
|
||||
SET_NAT = """
|
||||
<config>
|
||||
<cli-config-data>
|
||||
<cmd>interface %s</cmd>
|
||||
<cmd>ip nat %s</cmd>
|
||||
</cli-config-data>
|
||||
</config>
|
||||
"""
|
||||
|
||||
#=================================================#
|
||||
# Remove NAT
|
||||
# Syntax : interface <interface>
|
||||
# no ip nat <inside|outside>
|
||||
#=================================================#
|
||||
REMOVE_NAT = """
|
||||
<config>
|
||||
<cli-config-data>
|
||||
<cmd>interface %s</cmd>
|
||||
<cmd>no ip nat %s</cmd>
|
||||
</cli-config-data>
|
||||
</config>
|
||||
"""
|
||||
|
||||
#=========================================================================#
|
||||
# Set Static source translation on an interface
|
||||
# Syntax: ip nat inside source static <fixed_ip> <floating_ip>
|
||||
# .......vrf <vrf_name> match-in-vrf
|
||||
# eg: $(config)ip nat inside source static 192.168.0.1 121.158.0.5
|
||||
# ..........vrf nrouter-e7d4y5 match-in-vrf
|
||||
#========================================================================#
|
||||
SET_STATIC_SRC_TRL = """
|
||||
<config>
|
||||
<cli-config-data>
|
||||
<cmd>ip nat inside source static %s %s vrf %s match-in-vrf</cmd>
|
||||
</cli-config-data>
|
||||
</config>
|
||||
|
||||
"""
|
||||
|
||||
#=========================================================================#
|
||||
# Remove Static source translation on an interface
|
||||
# Syntax: no ip nat inside source static <fixed_ip> <floating_ip>
|
||||
# .......vrf <vrf_name> match-in-vrf
|
||||
# eg: $(config)no ip nat inside source static 192.168.0.1 121.158.0.5
|
||||
# ..........vrf nrouter-e7d4y5 match-in-vrf
|
||||
#========================================================================#
|
||||
REMOVE_STATIC_SRC_TRL = """
|
||||
<config>
|
||||
<cli-config-data>
|
||||
<cmd>no ip nat inside source static %s %s vrf %s match-in-vrf</cmd>
|
||||
</cli-config-data>
|
||||
</config>
|
||||
|
||||
"""
|
||||
|
||||
#=============================================================================#
|
||||
# Set ip route
|
||||
# Syntax: ip route vrf <vrf-name> <destination> <mask> [<interface>] <next hop>
|
||||
# eg: $(config)ip route vrf nrouter-e7d4y5 8.8.0.0 255.255.0.0 10.0.100.255
|
||||
#=============================================================================#
|
||||
SET_IP_ROUTE = """
|
||||
<config>
|
||||
<cli-config-data>
|
||||
<cmd>ip route vrf %s %s %s %s</cmd>
|
||||
</cli-config-data>
|
||||
</config>
|
||||
"""
|
||||
|
||||
#=============================================================================#
|
||||
# Remove ip route
|
||||
# Syntax: no ip route vrf <vrf-name> <destination> <mask>
|
||||
# [<interface>] <next hop>
|
||||
# eg: $(config)no ip route vrf nrouter-e7d4y5 8.8.0.0 255.255.0.0 10.0.100.255
|
||||
#=============================================================================#
|
||||
REMOVE_IP_ROUTE = """
|
||||
<config>
|
||||
<cli-config-data>
|
||||
<cmd>no ip route vrf %s %s %s %s</cmd>
|
||||
</cli-config-data>
|
||||
</config>
|
||||
"""
|
||||
#=============================================================================#
|
||||
# Set default ip route
|
||||
# Syntax: ip route vrf <vrf-name> 0.0.0.0 0.0.0.0 [<interface>] <next hop>
|
||||
# eg: $(config)ip route vrf nrouter-e7d4y5 0.0.0.0 0.0.0.0 10.0.100.255
|
||||
#=============================================================================#
|
||||
DEFAULT_ROUTE_CFG = 'ip route vrf %s 0.0.0.0 0.0.0.0 %s'
|
||||
|
||||
SET_DEFAULT_ROUTE = """
|
||||
<config>
|
||||
<cli-config-data>
|
||||
<cmd>ip route vrf %s 0.0.0.0 0.0.0.0 %s</cmd>
|
||||
</cli-config-data>
|
||||
</config>
|
||||
"""
|
||||
|
||||
#=============================================================================#
|
||||
# Remove default ip route
|
||||
# Syntax: ip route vrf <vrf-name> 0.0.0.0 0.0.0.0 [<interface>] <next hop>
|
||||
# eg: $(config)ip route vrf nrouter-e7d4y5 0.0.0.0 0.0.0.0 10.0.100.255
|
||||
#=============================================================================#
|
||||
REMOVE_DEFAULT_ROUTE = """
|
||||
<config>
|
||||
<cli-config-data>
|
||||
<cmd>no ip route vrf %s 0.0.0.0 0.0.0.0 %s</cmd>
|
||||
</cli-config-data>
|
||||
</config>
|
||||
"""
|
||||
|
||||
#=============================================================================#
|
||||
# Clear dynamic nat translations. This is used to clear any nat bindings before
|
||||
# we can turn off NAT on an interface
|
||||
# Syntax: clear ip nat translation [forced]
|
||||
#=============================================================================#
|
||||
# CLEAR_DYN_NAT_TRANS = """
|
||||
# <oper-data-format-text-block>
|
||||
# <exec>clear ip nat translation forced</exec>
|
||||
# </oper-data-format-text-block>
|
||||
# """
|
||||
CLEAR_DYN_NAT_TRANS = """
|
||||
<config>
|
||||
<cli-config-data>
|
||||
<cmd>do clear ip nat translation forced</cmd>
|
||||
</cli-config-data>
|
||||
</config>
|
||||
"""
|
@ -1,686 +0,0 @@
|
||||
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import logging
|
||||
import netaddr
|
||||
import re
|
||||
import time
|
||||
import xml.etree.ElementTree as ET
|
||||
|
||||
import ciscoconfparse
|
||||
from ncclient import manager
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
from neutron.i18n import _LE, _LI, _LW
|
||||
from neutron.plugins.cisco.cfg_agent import cfg_exceptions as cfg_exc
|
||||
from neutron.plugins.cisco.cfg_agent.device_drivers.csr1kv import (
|
||||
cisco_csr1kv_snippets as snippets)
|
||||
from neutron.plugins.cisco.cfg_agent.device_drivers import devicedriver_api
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# N1kv constants
|
||||
T1_PORT_NAME_PREFIX = 't1_p:' # T1 port/network is for VXLAN
|
||||
T2_PORT_NAME_PREFIX = 't2_p:' # T2 port/network is for VLAN
|
||||
|
||||
|
||||
class CSR1kvRoutingDriver(devicedriver_api.RoutingDriverBase):
|
||||
"""CSR1kv Routing Driver.
|
||||
|
||||
This driver encapsulates the configuration logic via NETCONF protocol to
|
||||
configure a CSR1kv Virtual Router (IOS-XE based) for implementing
|
||||
Neutron L3 services. These services include routing, NAT and floating
|
||||
IPs (as per Neutron terminology).
|
||||
"""
|
||||
|
||||
DEV_NAME_LEN = 14
|
||||
|
||||
def __init__(self, **device_params):
|
||||
try:
|
||||
self._csr_host = device_params['management_ip_address']
|
||||
self._csr_ssh_port = device_params['protocol_port']
|
||||
credentials = device_params['credentials']
|
||||
if credentials:
|
||||
self._csr_user = credentials['username']
|
||||
self._csr_password = credentials['password']
|
||||
self._timeout = cfg.CONF.cfg_agent.device_connection_timeout
|
||||
self._csr_conn = None
|
||||
self._intfs_enabled = False
|
||||
except KeyError as e:
|
||||
LOG.error(_LE("Missing device parameter:%s. Aborting "
|
||||
"CSR1kvRoutingDriver initialization"), e)
|
||||
raise cfg_exc.CSR1kvInitializationException()
|
||||
|
||||
###### Public Functions ########
|
||||
def router_added(self, ri):
|
||||
self._csr_create_vrf(ri)
|
||||
|
||||
def router_removed(self, ri):
|
||||
self._csr_remove_vrf(ri)
|
||||
|
||||
def internal_network_added(self, ri, port):
|
||||
self._csr_create_subinterface(ri, port)
|
||||
if port.get('ha_info') is not None and ri.ha_info['ha:enabled']:
|
||||
self._csr_add_ha(ri, port)
|
||||
|
||||
def internal_network_removed(self, ri, port):
|
||||
self._csr_remove_subinterface(port)
|
||||
|
||||
def external_gateway_added(self, ri, ex_gw_port):
|
||||
self._csr_create_subinterface(ri, ex_gw_port)
|
||||
ex_gw_ip = ex_gw_port['subnet']['gateway_ip']
|
||||
if ex_gw_ip:
|
||||
#Set default route via this network's gateway ip
|
||||
self._csr_add_default_route(ri, ex_gw_ip)
|
||||
|
||||
def external_gateway_removed(self, ri, ex_gw_port):
|
||||
ex_gw_ip = ex_gw_port['subnet']['gateway_ip']
|
||||
if ex_gw_ip:
|
||||
#Remove default route via this network's gateway ip
|
||||
self._csr_remove_default_route(ri, ex_gw_ip)
|
||||
#Finally, remove external network subinterface
|
||||
self._csr_remove_subinterface(ex_gw_port)
|
||||
|
||||
def enable_internal_network_NAT(self, ri, port, ex_gw_port):
|
||||
self._csr_add_internalnw_nat_rules(ri, port, ex_gw_port)
|
||||
|
||||
def disable_internal_network_NAT(self, ri, port, ex_gw_port):
|
||||
self._csr_remove_internalnw_nat_rules(ri, [port], ex_gw_port)
|
||||
|
||||
def floating_ip_added(self, ri, ex_gw_port, floating_ip, fixed_ip):
|
||||
self._csr_add_floating_ip(ri, floating_ip, fixed_ip)
|
||||
|
||||
def floating_ip_removed(self, ri, ex_gw_port, floating_ip, fixed_ip):
|
||||
self._csr_remove_floating_ip(ri, ex_gw_port, floating_ip, fixed_ip)
|
||||
|
||||
def routes_updated(self, ri, action, route):
|
||||
self._csr_update_routing_table(ri, action, route)
|
||||
|
||||
def clear_connection(self):
|
||||
self._csr_conn = None
|
||||
|
||||
##### Internal Functions ####
|
||||
|
||||
def _csr_create_subinterface(self, ri, port):
|
||||
vrf_name = self._csr_get_vrf_name(ri)
|
||||
ip_cidr = port['ip_cidr']
|
||||
netmask = netaddr.IPNetwork(ip_cidr).netmask
|
||||
gateway_ip = ip_cidr.split('/')[0]
|
||||
subinterface = self._get_interface_name_from_hosting_port(port)
|
||||
vlan = self._get_interface_vlan_from_hosting_port(port)
|
||||
self._create_subinterface(subinterface, vlan, vrf_name,
|
||||
gateway_ip, netmask)
|
||||
|
||||
def _csr_remove_subinterface(self, port):
|
||||
subinterface = self._get_interface_name_from_hosting_port(port)
|
||||
self._remove_subinterface(subinterface)
|
||||
|
||||
def _csr_add_ha(self, ri, port):
|
||||
func_dict = {
|
||||
'HSRP': CSR1kvRoutingDriver._csr_add_ha_HSRP,
|
||||
'VRRP': CSR1kvRoutingDriver._csr_add_ha_VRRP,
|
||||
'GBLP': CSR1kvRoutingDriver._csr_add_ha_GBLP
|
||||
}
|
||||
#Invoke the right function for the ha type
|
||||
func_dict[ri.ha_info['ha:type']](self, ri, port)
|
||||
|
||||
def _csr_add_ha_HSRP(self, ri, port):
|
||||
priority = ri.ha_info['priority']
|
||||
port_ha_info = port['ha_info']
|
||||
group = port_ha_info['group']
|
||||
ip = port_ha_info['virtual_port']['fixed_ips'][0]['ip_address']
|
||||
if ip and group and priority:
|
||||
vrf_name = self._csr_get_vrf_name(ri)
|
||||
subinterface = self._get_interface_name_from_hosting_port(port)
|
||||
self._set_ha_HSRP(subinterface, vrf_name, priority, group, ip)
|
||||
|
||||
def _csr_add_ha_VRRP(self, ri, port):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _csr_add_ha_GBLP(self, ri, port):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _csr_remove_ha(self, ri, port):
|
||||
pass
|
||||
|
||||
def _csr_add_internalnw_nat_rules(self, ri, port, ex_port):
|
||||
vrf_name = self._csr_get_vrf_name(ri)
|
||||
in_vlan = self._get_interface_vlan_from_hosting_port(port)
|
||||
acl_no = 'acl_' + str(in_vlan)
|
||||
internal_cidr = port['ip_cidr']
|
||||
internal_net = netaddr.IPNetwork(internal_cidr).network
|
||||
netmask = netaddr.IPNetwork(internal_cidr).hostmask
|
||||
inner_intfc = self._get_interface_name_from_hosting_port(port)
|
||||
outer_intfc = self._get_interface_name_from_hosting_port(ex_port)
|
||||
self._nat_rules_for_internet_access(acl_no, internal_net,
|
||||
netmask, inner_intfc,
|
||||
outer_intfc, vrf_name)
|
||||
|
||||
def _csr_remove_internalnw_nat_rules(self, ri, ports, ex_port):
|
||||
acls = []
|
||||
#First disable nat in all inner ports
|
||||
for port in ports:
|
||||
in_intfc_name = self._get_interface_name_from_hosting_port(port)
|
||||
inner_vlan = self._get_interface_vlan_from_hosting_port(port)
|
||||
acls.append("acl_" + str(inner_vlan))
|
||||
self._remove_interface_nat(in_intfc_name, 'inside')
|
||||
|
||||
#Wait for two second
|
||||
LOG.debug("Sleep for 2 seconds before clearing NAT rules")
|
||||
time.sleep(2)
|
||||
|
||||
#Clear the NAT translation table
|
||||
self._remove_dyn_nat_translations()
|
||||
|
||||
# Remove dynamic NAT rules and ACLs
|
||||
vrf_name = self._csr_get_vrf_name(ri)
|
||||
ext_intfc_name = self._get_interface_name_from_hosting_port(ex_port)
|
||||
for acl in acls:
|
||||
self._remove_dyn_nat_rule(acl, ext_intfc_name, vrf_name)
|
||||
|
||||
def _csr_add_default_route(self, ri, gw_ip):
|
||||
vrf_name = self._csr_get_vrf_name(ri)
|
||||
self._add_default_static_route(gw_ip, vrf_name)
|
||||
|
||||
def _csr_remove_default_route(self, ri, gw_ip):
|
||||
vrf_name = self._csr_get_vrf_name(ri)
|
||||
self._remove_default_static_route(gw_ip, vrf_name)
|
||||
|
||||
def _csr_add_floating_ip(self, ri, floating_ip, fixed_ip):
|
||||
vrf_name = self._csr_get_vrf_name(ri)
|
||||
self._add_floating_ip(floating_ip, fixed_ip, vrf_name)
|
||||
|
||||
def _csr_remove_floating_ip(self, ri, ex_gw_port, floating_ip, fixed_ip):
|
||||
vrf_name = self._csr_get_vrf_name(ri)
|
||||
out_intfc_name = self._get_interface_name_from_hosting_port(ex_gw_port)
|
||||
# First remove NAT from outer interface
|
||||
self._remove_interface_nat(out_intfc_name, 'outside')
|
||||
#Clear the NAT translation table
|
||||
self._remove_dyn_nat_translations()
|
||||
#Remove the floating ip
|
||||
self._remove_floating_ip(floating_ip, fixed_ip, vrf_name)
|
||||
#Enable NAT on outer interface
|
||||
self._add_interface_nat(out_intfc_name, 'outside')
|
||||
|
||||
def _csr_update_routing_table(self, ri, action, route):
|
||||
vrf_name = self._csr_get_vrf_name(ri)
|
||||
destination_net = netaddr.IPNetwork(route['destination'])
|
||||
dest = destination_net.network
|
||||
dest_mask = destination_net.netmask
|
||||
next_hop = route['nexthop']
|
||||
if action is 'replace':
|
||||
self._add_static_route(dest, dest_mask, next_hop, vrf_name)
|
||||
elif action is 'delete':
|
||||
self._remove_static_route(dest, dest_mask, next_hop, vrf_name)
|
||||
else:
|
||||
LOG.error(_LE('Unknown route command %s'), action)
|
||||
|
||||
def _csr_create_vrf(self, ri):
|
||||
vrf_name = self._csr_get_vrf_name(ri)
|
||||
self._create_vrf(vrf_name)
|
||||
|
||||
def _csr_remove_vrf(self, ri):
|
||||
vrf_name = self._csr_get_vrf_name(ri)
|
||||
self._remove_vrf(vrf_name)
|
||||
|
||||
def _csr_get_vrf_name(self, ri):
|
||||
return ri.router_name()[:self.DEV_NAME_LEN]
|
||||
|
||||
def _get_connection(self):
|
||||
"""Make SSH connection to the CSR.
|
||||
|
||||
The external ncclient library is used for creating this connection.
|
||||
This method keeps state of any existing connections and reuses them if
|
||||
already connected. Also CSR1kv's interfaces (except management) are
|
||||
disabled by default when it is booted. So if connecting for the first
|
||||
time, driver will enable all other interfaces and keep that status in
|
||||
the `_intfs_enabled` flag.
|
||||
"""
|
||||
try:
|
||||
if self._csr_conn and self._csr_conn.connected:
|
||||
return self._csr_conn
|
||||
else:
|
||||
self._csr_conn = manager.connect(host=self._csr_host,
|
||||
port=self._csr_ssh_port,
|
||||
username=self._csr_user,
|
||||
password=self._csr_password,
|
||||
device_params={'name': "csr"},
|
||||
timeout=self._timeout)
|
||||
if not self._intfs_enabled:
|
||||
self._intfs_enabled = self._enable_intfs(self._csr_conn)
|
||||
return self._csr_conn
|
||||
except Exception as e:
|
||||
conn_params = {'host': self._csr_host, 'port': self._csr_ssh_port,
|
||||
'user': self._csr_user,
|
||||
'timeout': self._timeout, 'reason': e.message}
|
||||
raise cfg_exc.CSR1kvConnectionException(**conn_params)
|
||||
|
||||
def _get_interface_name_from_hosting_port(self, port):
|
||||
vlan = self._get_interface_vlan_from_hosting_port(port)
|
||||
int_no = self._get_interface_no_from_hosting_port(port)
|
||||
intfc_name = 'GigabitEthernet%s.%s' % (int_no, vlan)
|
||||
return intfc_name
|
||||
|
||||
@staticmethod
|
||||
def _get_interface_vlan_from_hosting_port(port):
|
||||
return port['hosting_info']['segmentation_id']
|
||||
|
||||
@staticmethod
|
||||
def _get_interface_no_from_hosting_port(port):
|
||||
"""Calculate interface number from the hosting port's name.
|
||||
|
||||
Interfaces in the CSR1kv are created in pairs (T1 and T2) where
|
||||
T1 interface is used for VLAN and T2 interface for VXLAN traffic
|
||||
respectively. On the neutron side these are named T1 and T2 ports and
|
||||
follows the naming convention: <Tx_PORT_NAME_PREFIX>:<PAIR_INDEX>
|
||||
where the `PORT_NAME_PREFIX` indicates either VLAN or VXLAN and
|
||||
`PAIR_INDEX` is the pair number. `PAIR_INDEX` starts at 1.
|
||||
|
||||
In CSR1kv, GigabitEthernet 0 is not present and GigabitEthernet 1
|
||||
is used as a management interface (Note: this might change in
|
||||
future). So the first (T1,T2) pair corresponds to
|
||||
(GigabitEthernet 2, GigabitEthernet 3) and so forth. This function
|
||||
extracts the `PAIR_INDEX` and calculates the corresponding interface
|
||||
number.
|
||||
|
||||
:param port: neutron port corresponding to the interface.
|
||||
:return: number of the interface (eg: 1 in case of GigabitEthernet1)
|
||||
"""
|
||||
_name = port['hosting_info']['hosting_port_name']
|
||||
if_type = _name.split(':')[0] + ':'
|
||||
if if_type == T1_PORT_NAME_PREFIX:
|
||||
return str(int(_name.split(':')[1]) * 2)
|
||||
elif if_type == T2_PORT_NAME_PREFIX:
|
||||
return str(int(_name.split(':')[1]) * 2 + 1)
|
||||
else:
|
||||
params = {'attribute': 'hosting_port_name', 'value': _name}
|
||||
raise cfg_exc.CSR1kvUnknownValueException(**params)
|
||||
|
||||
def _get_interfaces(self):
|
||||
"""Get a list of interfaces on this hosting device.
|
||||
|
||||
:return: List of the interfaces
|
||||
"""
|
||||
ioscfg = self._get_running_config()
|
||||
parse = ciscoconfparse.CiscoConfParse(ioscfg)
|
||||
intfs_raw = parse.find_lines("^interface GigabitEthernet")
|
||||
intfs = [raw_if.strip().split(' ')[1] for raw_if in intfs_raw]
|
||||
LOG.info(_LI("Interfaces:%s"), intfs)
|
||||
return intfs
|
||||
|
||||
def _get_interface_ip(self, interface_name):
|
||||
"""Get the ip address for an interface.
|
||||
|
||||
:param interface_name: interface_name as a string
|
||||
:return: ip address of interface as a string
|
||||
"""
|
||||
ioscfg = self._get_running_config()
|
||||
parse = ciscoconfparse.CiscoConfParse(ioscfg)
|
||||
children = parse.find_children("^interface %s" % interface_name)
|
||||
for line in children:
|
||||
if 'ip address' in line:
|
||||
ip_address = line.strip().split(' ')[2]
|
||||
LOG.info(_LI("IP Address:%s"), ip_address)
|
||||
return ip_address
|
||||
LOG.warning(_LW("Cannot find interface: %s"), interface_name)
|
||||
return None
|
||||
|
||||
def _interface_exists(self, interface):
|
||||
"""Check whether interface exists."""
|
||||
ioscfg = self._get_running_config()
|
||||
parse = ciscoconfparse.CiscoConfParse(ioscfg)
|
||||
intfs_raw = parse.find_lines("^interface " + interface)
|
||||
return len(intfs_raw) > 0
|
||||
|
||||
def _enable_intfs(self, conn):
|
||||
"""Enable the interfaces of a CSR1kv Virtual Router.
|
||||
|
||||
When the virtual router first boots up, all interfaces except
|
||||
management are down. This method will enable all data interfaces.
|
||||
|
||||
Note: In CSR1kv, GigabitEthernet 0 is not present. GigabitEthernet 1
|
||||
is used as management and GigabitEthernet 2 and up are used for data.
|
||||
This might change in future releases.
|
||||
|
||||
Currently only the second and third Gig interfaces corresponding to a
|
||||
single (T1,T2) pair and configured as trunk for VLAN and VXLAN
|
||||
is enabled.
|
||||
|
||||
:param conn: Connection object
|
||||
:return: True or False
|
||||
"""
|
||||
|
||||
#ToDo(Hareesh): Interfaces are hard coded for now. Make it dynamic.
|
||||
interfaces = ['GigabitEthernet 2', 'GigabitEthernet 3']
|
||||
try:
|
||||
for i in interfaces:
|
||||
confstr = snippets.ENABLE_INTF % i
|
||||
rpc_obj = conn.edit_config(target='running', config=confstr)
|
||||
if self._check_response(rpc_obj, 'ENABLE_INTF'):
|
||||
LOG.info(_LI("Enabled interface %s "), i)
|
||||
time.sleep(1)
|
||||
except Exception:
|
||||
return False
|
||||
return True
|
||||
|
||||
def _get_vrfs(self):
|
||||
"""Get the current VRFs configured in the device.
|
||||
|
||||
:return: A list of vrf names as string
|
||||
"""
|
||||
vrfs = []
|
||||
ioscfg = self._get_running_config()
|
||||
parse = ciscoconfparse.CiscoConfParse(ioscfg)
|
||||
vrfs_raw = parse.find_lines("^ip vrf")
|
||||
for line in vrfs_raw:
|
||||
# raw format ['ip vrf <vrf-name>',....]
|
||||
vrf_name = line.strip().split(' ')[2]
|
||||
vrfs.append(vrf_name)
|
||||
LOG.info(_LI("VRFs:%s"), vrfs)
|
||||
return vrfs
|
||||
|
||||
def _get_capabilities(self):
|
||||
"""Get the servers NETCONF capabilities.
|
||||
|
||||
:return: List of server capabilities.
|
||||
"""
|
||||
conn = self._get_connection()
|
||||
capabilities = []
|
||||
for c in conn.server_capabilities:
|
||||
capabilities.append(c)
|
||||
LOG.debug("Server capabilities: %s", capabilities)
|
||||
return capabilities
|
||||
|
||||
def _get_running_config(self):
|
||||
"""Get the CSR's current running config.
|
||||
|
||||
:return: Current IOS running config as multiline string
|
||||
"""
|
||||
conn = self._get_connection()
|
||||
config = conn.get_config(source="running")
|
||||
if config:
|
||||
root = ET.fromstring(config._raw)
|
||||
running_config = root[0][0]
|
||||
rgx = re.compile("\r*\n+")
|
||||
ioscfg = rgx.split(running_config.text)
|
||||
return ioscfg
|
||||
|
||||
def _check_acl(self, acl_no, network, netmask):
|
||||
"""Check a ACL config exists in the running config.
|
||||
|
||||
:param acl_no: access control list (ACL) number
|
||||
:param network: network which this ACL permits
|
||||
:param netmask: netmask of the network
|
||||
:return:
|
||||
"""
|
||||
exp_cfg_lines = ['ip access-list standard ' + str(acl_no),
|
||||
' permit ' + str(network) + ' ' + str(netmask)]
|
||||
ioscfg = self._get_running_config()
|
||||
parse = ciscoconfparse.CiscoConfParse(ioscfg)
|
||||
acls_raw = parse.find_children(exp_cfg_lines[0])
|
||||
if acls_raw:
|
||||
if exp_cfg_lines[1] in acls_raw:
|
||||
return True
|
||||
LOG.error(_LE("Mismatch in ACL configuration for %s"), acl_no)
|
||||
return False
|
||||
LOG.debug("%s is not present in config", acl_no)
|
||||
return False
|
||||
|
||||
def _cfg_exists(self, cfg_str):
|
||||
"""Check a partial config string exists in the running config.
|
||||
|
||||
:param cfg_str: config string to check
|
||||
:return : True or False
|
||||
"""
|
||||
ioscfg = self._get_running_config()
|
||||
parse = ciscoconfparse.CiscoConfParse(ioscfg)
|
||||
cfg_raw = parse.find_lines("^" + cfg_str)
|
||||
LOG.debug("_cfg_exists(): Found lines %s", cfg_raw)
|
||||
return len(cfg_raw) > 0
|
||||
|
||||
def _set_interface(self, name, ip_address, mask):
|
||||
conn = self._get_connection()
|
||||
confstr = snippets.SET_INTC % (name, ip_address, mask)
|
||||
rpc_obj = conn.edit_config(target='running', config=confstr)
|
||||
self._check_response(rpc_obj, 'SET_INTC')
|
||||
|
||||
def _create_vrf(self, vrf_name):
|
||||
try:
|
||||
conn = self._get_connection()
|
||||
confstr = snippets.CREATE_VRF % vrf_name
|
||||
rpc_obj = conn.edit_config(target='running', config=confstr)
|
||||
if self._check_response(rpc_obj, 'CREATE_VRF'):
|
||||
LOG.info(_LI("VRF %s successfully created"), vrf_name)
|
||||
except Exception:
|
||||
LOG.exception(_LE("Failed creating VRF %s"), vrf_name)
|
||||
|
||||
def _remove_vrf(self, vrf_name):
|
||||
if vrf_name in self._get_vrfs():
|
||||
conn = self._get_connection()
|
||||
confstr = snippets.REMOVE_VRF % vrf_name
|
||||
rpc_obj = conn.edit_config(target='running', config=confstr)
|
||||
if self._check_response(rpc_obj, 'REMOVE_VRF'):
|
||||
LOG.info(_LI("VRF %s removed"), vrf_name)
|
||||
else:
|
||||
LOG.warning(_LW("VRF %s not present"), vrf_name)
|
||||
|
||||
def _create_subinterface(self, subinterface, vlan_id, vrf_name, ip, mask):
|
||||
if vrf_name not in self._get_vrfs():
|
||||
LOG.error(_LE("VRF %s not present"), vrf_name)
|
||||
confstr = snippets.CREATE_SUBINTERFACE % (subinterface, vlan_id,
|
||||
vrf_name, ip, mask)
|
||||
self._edit_running_config(confstr, 'CREATE_SUBINTERFACE')
|
||||
|
||||
def _remove_subinterface(self, subinterface):
|
||||
#Optional : verify this is the correct subinterface
|
||||
if self._interface_exists(subinterface):
|
||||
confstr = snippets.REMOVE_SUBINTERFACE % subinterface
|
||||
self._edit_running_config(confstr, 'REMOVE_SUBINTERFACE')
|
||||
|
||||
def _set_ha_HSRP(self, subinterface, vrf_name, priority, group, ip):
|
||||
if vrf_name not in self._get_vrfs():
|
||||
LOG.error(_LE("VRF %s not present"), vrf_name)
|
||||
confstr = snippets.SET_INTC_HSRP % (subinterface, vrf_name, group,
|
||||
priority, group, ip)
|
||||
action = "SET_INTC_HSRP (Group: %s, Priority: % s)" % (group, priority)
|
||||
self._edit_running_config(confstr, action)
|
||||
|
||||
def _remove_ha_HSRP(self, subinterface, group):
|
||||
confstr = snippets.REMOVE_INTC_HSRP % (subinterface, group)
|
||||
action = ("REMOVE_INTC_HSRP (subinterface:%s, Group:%s)"
|
||||
% (subinterface, group))
|
||||
self._edit_running_config(confstr, action)
|
||||
|
||||
def _get_interface_cfg(self, interface):
|
||||
ioscfg = self._get_running_config()
|
||||
parse = ciscoconfparse.CiscoConfParse(ioscfg)
|
||||
return parse.find_children('interface ' + interface)
|
||||
|
||||
def _nat_rules_for_internet_access(self, acl_no, network,
|
||||
netmask,
|
||||
inner_intfc,
|
||||
outer_intfc,
|
||||
vrf_name):
|
||||
"""Configure the NAT rules for an internal network.
|
||||
|
||||
Configuring NAT rules in the CSR1kv is a three step process. First
|
||||
create an ACL for the IP range of the internal network. Then enable
|
||||
dynamic source NATing on the external interface of the CSR for this
|
||||
ACL and VRF of the neutron router. Finally enable NAT on the
|
||||
interfaces of the CSR where the internal and external networks are
|
||||
connected.
|
||||
|
||||
:param acl_no: ACL number of the internal network.
|
||||
:param network: internal network
|
||||
:param netmask: netmask of the internal network.
|
||||
:param inner_intfc: (name of) interface connected to the internal
|
||||
network
|
||||
:param outer_intfc: (name of) interface connected to the external
|
||||
network
|
||||
:param vrf_name: VRF corresponding to this virtual router
|
||||
:return: True if configuration succeeded
|
||||
:raises: neutron.plugins.cisco.cfg_agent.cfg_exceptions.
|
||||
CSR1kvConfigException
|
||||
"""
|
||||
conn = self._get_connection()
|
||||
# Duplicate ACL creation throws error, so checking
|
||||
# it first. Remove it in future as this is not common in production
|
||||
acl_present = self._check_acl(acl_no, network, netmask)
|
||||
if not acl_present:
|
||||
confstr = snippets.CREATE_ACL % (acl_no, network, netmask)
|
||||
rpc_obj = conn.edit_config(target='running', config=confstr)
|
||||
self._check_response(rpc_obj, 'CREATE_ACL')
|
||||
|
||||
confstr = snippets.SET_DYN_SRC_TRL_INTFC % (acl_no, outer_intfc,
|
||||
vrf_name)
|
||||
rpc_obj = conn.edit_config(target='running', config=confstr)
|
||||
self._check_response(rpc_obj, 'CREATE_SNAT')
|
||||
|
||||
confstr = snippets.SET_NAT % (inner_intfc, 'inside')
|
||||
rpc_obj = conn.edit_config(target='running', config=confstr)
|
||||
self._check_response(rpc_obj, 'SET_NAT')
|
||||
|
||||
confstr = snippets.SET_NAT % (outer_intfc, 'outside')
|
||||
rpc_obj = conn.edit_config(target='running', config=confstr)
|
||||
self._check_response(rpc_obj, 'SET_NAT')
|
||||
|
||||
def _add_interface_nat(self, intfc_name, intfc_type):
|
||||
conn = self._get_connection()
|
||||
confstr = snippets.SET_NAT % (intfc_name, intfc_type)
|
||||
rpc_obj = conn.edit_config(target='running', config=confstr)
|
||||
self._check_response(rpc_obj, 'SET_NAT ' + intfc_type)
|
||||
|
||||
def _remove_interface_nat(self, intfc_name, intfc_type):
|
||||
conn = self._get_connection()
|
||||
confstr = snippets.REMOVE_NAT % (intfc_name, intfc_type)
|
||||
rpc_obj = conn.edit_config(target='running', config=confstr)
|
||||
self._check_response(rpc_obj, 'REMOVE_NAT ' + intfc_type)
|
||||
|
||||
def _remove_dyn_nat_rule(self, acl_no, outer_intfc_name, vrf_name):
|
||||
conn = self._get_connection()
|
||||
confstr = snippets.SNAT_CFG % (acl_no, outer_intfc_name, vrf_name)
|
||||
if self._cfg_exists(confstr):
|
||||
confstr = snippets.REMOVE_DYN_SRC_TRL_INTFC % (acl_no,
|
||||
outer_intfc_name,
|
||||
vrf_name)
|
||||
rpc_obj = conn.edit_config(target='running', config=confstr)
|
||||
self._check_response(rpc_obj, 'REMOVE_DYN_SRC_TRL_INTFC')
|
||||
|
||||
confstr = snippets.REMOVE_ACL % acl_no
|
||||
rpc_obj = conn.edit_config(target='running', config=confstr)
|
||||
self._check_response(rpc_obj, 'REMOVE_ACL')
|
||||
|
||||
def _remove_dyn_nat_translations(self):
|
||||
conn = self._get_connection()
|
||||
confstr = snippets.CLEAR_DYN_NAT_TRANS
|
||||
rpc_obj = conn.edit_config(target='running', config=confstr)
|
||||
self._check_response(rpc_obj, 'CLEAR_DYN_NAT_TRANS')
|
||||
|
||||
def _add_floating_ip(self, floating_ip, fixed_ip, vrf):
|
||||
conn = self._get_connection()
|
||||
confstr = snippets.SET_STATIC_SRC_TRL % (fixed_ip, floating_ip, vrf)
|
||||
rpc_obj = conn.edit_config(target='running', config=confstr)
|
||||
self._check_response(rpc_obj, 'SET_STATIC_SRC_TRL')
|
||||
|
||||
def _remove_floating_ip(self, floating_ip, fixed_ip, vrf):
|
||||
conn = self._get_connection()
|
||||
confstr = snippets.REMOVE_STATIC_SRC_TRL % (fixed_ip, floating_ip, vrf)
|
||||
rpc_obj = conn.edit_config(target='running', config=confstr)
|
||||
self._check_response(rpc_obj, 'REMOVE_STATIC_SRC_TRL')
|
||||
|
||||
def _get_floating_ip_cfg(self):
|
||||
ioscfg = self._get_running_config()
|
||||
parse = ciscoconfparse.CiscoConfParse(ioscfg)
|
||||
res = parse.find_lines('ip nat inside source static')
|
||||
return res
|
||||
|
||||
def _add_static_route(self, dest, dest_mask, next_hop, vrf):
|
||||
conn = self._get_connection()
|
||||
confstr = snippets.SET_IP_ROUTE % (vrf, dest, dest_mask, next_hop)
|
||||
rpc_obj = conn.edit_config(target='running', config=confstr)
|
||||
self._check_response(rpc_obj, 'SET_IP_ROUTE')
|
||||
|
||||
def _remove_static_route(self, dest, dest_mask, next_hop, vrf):
|
||||
conn = self._get_connection()
|
||||
confstr = snippets.REMOVE_IP_ROUTE % (vrf, dest, dest_mask, next_hop)
|
||||
rpc_obj = conn.edit_config(target='running', config=confstr)
|
||||
self._check_response(rpc_obj, 'REMOVE_IP_ROUTE')
|
||||
|
||||
def _get_static_route_cfg(self):
|
||||
ioscfg = self._get_running_config()
|
||||
parse = ciscoconfparse.CiscoConfParse(ioscfg)
|
||||
return parse.find_lines('ip route')
|
||||
|
||||
def _add_default_static_route(self, gw_ip, vrf):
|
||||
conn = self._get_connection()
|
||||
confstr = snippets.DEFAULT_ROUTE_CFG % (vrf, gw_ip)
|
||||
if not self._cfg_exists(confstr):
|
||||
confstr = snippets.SET_DEFAULT_ROUTE % (vrf, gw_ip)
|
||||
rpc_obj = conn.edit_config(target='running', config=confstr)
|
||||
self._check_response(rpc_obj, 'SET_DEFAULT_ROUTE')
|
||||
|
||||
def _remove_default_static_route(self, gw_ip, vrf):
|
||||
conn = self._get_connection()
|
||||
confstr = snippets.DEFAULT_ROUTE_CFG % (vrf, gw_ip)
|
||||
if self._cfg_exists(confstr):
|
||||
confstr = snippets.REMOVE_DEFAULT_ROUTE % (vrf, gw_ip)
|
||||
rpc_obj = conn.edit_config(target='running', config=confstr)
|
||||
self._check_response(rpc_obj, 'REMOVE_DEFAULT_ROUTE')
|
||||
|
||||
def _edit_running_config(self, confstr, snippet):
|
||||
conn = self._get_connection()
|
||||
rpc_obj = conn.edit_config(target='running', config=confstr)
|
||||
self._check_response(rpc_obj, snippet)
|
||||
|
||||
@staticmethod
|
||||
def _check_response(rpc_obj, snippet_name):
|
||||
"""This function checks the rpc response object for status.
|
||||
|
||||
This function takes as input the response rpc_obj and the snippet name
|
||||
that was executed. It parses it to see, if the last edit operation was
|
||||
a success or not.
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<rpc-reply message-id="urn:uuid:81bf8082-....-b69a-000c29e1b85c"
|
||||
xmlns="urn:ietf:params:netconf:base:1.0">
|
||||
<ok />
|
||||
</rpc-reply>
|
||||
In case of error, CSR1kv sends a response as follows.
|
||||
We take the error type and tag.
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<rpc-reply message-id="urn:uuid:81bf8082-....-b69a-000c29e1b85c"
|
||||
xmlns="urn:ietf:params:netconf:base:1.0">
|
||||
<rpc-error>
|
||||
<error-type>protocol</error-type>
|
||||
<error-tag>operation-failed</error-tag>
|
||||
<error-severity>error</error-severity>
|
||||
</rpc-error>
|
||||
</rpc-reply>
|
||||
:return: True if the config operation completed successfully
|
||||
:raises: neutron.plugins.cisco.cfg_agent.cfg_exceptions.
|
||||
CSR1kvConfigException
|
||||
"""
|
||||
LOG.debug("RPCReply for %(snippet_name)s is %(rpc_obj)s",
|
||||
{'snippet_name': snippet_name, 'rpc_obj': rpc_obj.xml})
|
||||
xml_str = rpc_obj.xml
|
||||
if "<ok />" in xml_str:
|
||||
LOG.debug("RPCReply for %s is OK", snippet_name)
|
||||
LOG.info(_LI("%s successfully executed"), snippet_name)
|
||||
return True
|
||||
# Not Ok, we throw a ConfigurationException
|
||||
e_type = rpc_obj._root[0][0].text
|
||||
e_tag = rpc_obj._root[0][1].text
|
||||
params = {'snippet': snippet_name, 'type': e_type, 'tag': e_tag}
|
||||
raise cfg_exc.CSR1kvConfigException(**params)
|
@ -1,158 +0,0 @@
|
||||
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import abc
|
||||
import six
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class RoutingDriverBase(object):
|
||||
"""Base class that defines an abstract interface for the Routing Driver.
|
||||
|
||||
This class defines the abstract interface/API for the Routing and
|
||||
NAT related operations. Driver class corresponding to a hosting device
|
||||
should inherit this base driver and implement its methods.
|
||||
RouterInfo object (neutron.plugins.cisco.cfg_agent.router_info.RouterInfo)
|
||||
is a wrapper around the router dictionary, with attributes for easy access
|
||||
to parameters.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def router_added(self, router_info):
|
||||
"""A logical router was assigned to the hosting device.
|
||||
|
||||
:param router_info: RouterInfo object for this router
|
||||
:return None
|
||||
"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def router_removed(self, router_info):
|
||||
"""A logical router was de-assigned from the hosting device.
|
||||
|
||||
:param router_info: RouterInfo object for this router
|
||||
:return None
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def internal_network_added(self, router_info, port):
|
||||
"""An internal network was connected to a router.
|
||||
|
||||
:param router_info: RouterInfo object for this router
|
||||
:param port : port dictionary for the port where the internal
|
||||
network is connected
|
||||
:return None
|
||||
"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def internal_network_removed(self, router_info, port):
|
||||
"""An internal network was removed from a router.
|
||||
|
||||
:param router_info: RouterInfo object for this router
|
||||
:param port : port dictionary for the port where the internal
|
||||
network was connected
|
||||
:return None
|
||||
"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def external_gateway_added(self, router_info, ex_gw_port):
|
||||
"""An external network was added to a router.
|
||||
|
||||
:param router_info: RouterInfo object of the router
|
||||
:param ex_gw_port : port dictionary for the port where the external
|
||||
gateway network is connected
|
||||
:return None
|
||||
"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def external_gateway_removed(self, router_info, ex_gw_port):
|
||||
"""An external network was removed from the router.
|
||||
|
||||
:param router_info: RouterInfo object of the router
|
||||
:param ex_gw_port : port dictionary for the port where the external
|
||||
gateway network was connected
|
||||
:return None
|
||||
"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def enable_internal_network_NAT(self, router_info, port, ex_gw_port):
|
||||
"""Enable NAT on an internal network.
|
||||
|
||||
:param router_info: RouterInfo object for this router
|
||||
:param port : port dictionary for the port where the internal
|
||||
network is connected
|
||||
:param ex_gw_port : port dictionary for the port where the external
|
||||
gateway network is connected
|
||||
:return None
|
||||
"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def disable_internal_network_NAT(self, router_info, port, ex_gw_port):
|
||||
"""Disable NAT on an internal network.
|
||||
|
||||
:param router_info: RouterInfo object for this router
|
||||
:param port : port dictionary for the port where the internal
|
||||
network is connected
|
||||
:param ex_gw_port : port dictionary for the port where the external
|
||||
gateway network is connected
|
||||
:return None
|
||||
"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def floating_ip_added(self, router_info, ex_gw_port,
|
||||
floating_ip, fixed_ip):
|
||||
"""A floating IP was added.
|
||||
|
||||
:param router_info: RouterInfo object for this router
|
||||
:param ex_gw_port : port dictionary for the port where the external
|
||||
gateway network is connected
|
||||
:param floating_ip: Floating IP as a string
|
||||
:param fixed_ip : Fixed IP of internal internal interface as
|
||||
a string
|
||||
:return None
|
||||
"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def floating_ip_removed(self, router_info, ex_gw_port,
|
||||
floating_ip, fixed_ip):
|
||||
"""A floating IP was removed.
|
||||
|
||||
:param router_info: RouterInfo object for this router
|
||||
:param ex_gw_port : port dictionary for the port where the external
|
||||
gateway network is connected
|
||||
:param floating_ip: Floating IP as a string
|
||||
:param fixed_ip: Fixed IP of internal internal interface as a string
|
||||
:return None
|
||||
"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def routes_updated(self, router_info, action, route):
|
||||
"""Routes were updated for router.
|
||||
|
||||
:param router_info: RouterInfo object for this router
|
||||
:param action : Action on the route , either 'replace' or 'delete'
|
||||
:param route: route dictionary with keys 'destination' & 'next_hop'
|
||||
:return None
|
||||
"""
|
||||
pass
|
@ -1,99 +0,0 @@
|
||||
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_utils import excutils
|
||||
from oslo_utils import importutils
|
||||
|
||||
from neutron.i18n import _LE
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.cisco.cfg_agent import cfg_exceptions
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DeviceDriverManager(object):
|
||||
"""This class acts as a manager for device drivers.
|
||||
|
||||
The device driver manager maintains the relationship between the
|
||||
different neutron logical resource (eg: routers, firewalls, vpns etc.) and
|
||||
where they are hosted. For configuring a logical resource (router) in a
|
||||
hosting device, a corresponding device driver object is used.
|
||||
Device drivers encapsulate the necessary configuration information to
|
||||
configure a logical resource (eg: routers, firewalls, vpns etc.) on a
|
||||
hosting device (eg: CSR1kv).
|
||||
|
||||
The device driver class loads one driver object per hosting device.
|
||||
The loaded drivers are cached in memory, so when a request is made to
|
||||
get driver object for the same hosting device and resource (like router),
|
||||
the existing driver object is reused.
|
||||
|
||||
This class is used by the service helper classes.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._drivers = {}
|
||||
self._hosting_device_routing_drivers_binding = {}
|
||||
|
||||
def get_driver(self, resource_id):
|
||||
try:
|
||||
return self._drivers[resource_id]
|
||||
except KeyError:
|
||||
with excutils.save_and_reraise_exception(reraise=False):
|
||||
raise cfg_exceptions.DriverNotFound(id=resource_id)
|
||||
|
||||
def set_driver(self, resource):
|
||||
"""Set the driver for a neutron resource.
|
||||
|
||||
:param resource: Neutron resource in dict format. Expected keys:
|
||||
{ 'id': <value>
|
||||
'hosting_device': { 'id': <value>, }
|
||||
'router_type': {'cfg_agent_driver': <value>, }
|
||||
}
|
||||
:return driver : driver object
|
||||
"""
|
||||
try:
|
||||
resource_id = resource['id']
|
||||
hosting_device = resource['hosting_device']
|
||||
hd_id = hosting_device['id']
|
||||
if hd_id in self._hosting_device_routing_drivers_binding:
|
||||
driver = self._hosting_device_routing_drivers_binding[hd_id]
|
||||
self._drivers[resource_id] = driver
|
||||
else:
|
||||
driver_class = resource['router_type']['cfg_agent_driver']
|
||||
driver = importutils.import_object(driver_class,
|
||||
**hosting_device)
|
||||
self._hosting_device_routing_drivers_binding[hd_id] = driver
|
||||
self._drivers[resource_id] = driver
|
||||
return driver
|
||||
except ImportError:
|
||||
with excutils.save_and_reraise_exception(reraise=False):
|
||||
LOG.exception(_LE("Error loading cfg agent driver %(driver)s "
|
||||
"for hosting device template "
|
||||
"%(t_name)s(%(t_id)s)"),
|
||||
{'driver': driver_class, 't_id': hd_id,
|
||||
't_name': resource['name']})
|
||||
raise cfg_exceptions.DriverNotExist(driver=driver_class)
|
||||
except KeyError as e:
|
||||
with excutils.save_and_reraise_exception(reraise=False):
|
||||
raise cfg_exceptions.DriverNotSetForMissingParameter(e)
|
||||
|
||||
def remove_driver(self, resource_id):
|
||||
"""Remove driver associated to a particular resource."""
|
||||
if resource_id in self._drivers:
|
||||
del self._drivers[resource_id]
|
||||
|
||||
def remove_driver_for_hosting_device(self, hd_id):
|
||||
"""Remove driver associated to a particular hosting device."""
|
||||
if hd_id in self._hosting_device_routing_drivers_binding:
|
||||
del self._hosting_device_routing_drivers_binding[hd_id]
|
@ -1,77 +0,0 @@
|
||||
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from oslo_serialization import jsonutils
|
||||
|
||||
from neutron.plugins.cisco.cfg_agent.device_drivers import devicedriver_api
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DummyRoutingDriver(devicedriver_api.RoutingDriverBase):
|
||||
"""Dummy Routing Driver.
|
||||
|
||||
This class emulates a routing driver without a real backing device.
|
||||
"""
|
||||
|
||||
def __init__(self, **device_params):
|
||||
my_device_params = device_params
|
||||
# Datetime values causes json decoding errors. So removing it locally
|
||||
if my_device_params.get('created_at'):
|
||||
del my_device_params['created_at']
|
||||
LOG.debug(jsonutils.dumps(my_device_params, sort_keys=True, indent=4))
|
||||
|
||||
###### Public Functions ########
|
||||
def router_added(self, ri):
|
||||
LOG.debug("DummyDriver router_added() called.")
|
||||
|
||||
def router_removed(self, ri):
|
||||
LOG.debug("DummyDriver router_removed() called.")
|
||||
|
||||
def internal_network_added(self, ri, port):
|
||||
LOG.debug("DummyDriver internal_network_added() called.")
|
||||
LOG.debug("Int port data: " + jsonutils.dumps(port, sort_keys=True,
|
||||
indent=4))
|
||||
|
||||
def internal_network_removed(self, ri, port):
|
||||
LOG.debug("DummyDriver internal_network_removed() called.")
|
||||
|
||||
def external_gateway_added(self, ri, ex_gw_port):
|
||||
LOG.debug("DummyDriver external_gateway_added() called.")
|
||||
LOG.debug("Ext port data: " + jsonutils.dumps(ex_gw_port,
|
||||
sort_keys=True,
|
||||
indent=4))
|
||||
|
||||
def external_gateway_removed(self, ri, ex_gw_port):
|
||||
LOG.debug("DummyDriver external_gateway_removed() called.")
|
||||
|
||||
def enable_internal_network_NAT(self, ri, port, ex_gw_port):
|
||||
LOG.debug("DummyDriver external_gateway_added() called.")
|
||||
|
||||
def disable_internal_network_NAT(self, ri, port, ex_gw_port):
|
||||
LOG.debug("DummyDriver disable_internal_network_NAT() called.")
|
||||
|
||||
def floating_ip_added(self, ri, ex_gw_port, floating_ip, fixed_ip):
|
||||
LOG.debug("DummyDriver floating_ip_added() called.")
|
||||
|
||||
def floating_ip_removed(self, ri, ex_gw_port, floating_ip, fixed_ip):
|
||||
LOG.debug("DummyDriver floating_ip_removed() called.")
|
||||
|
||||
def routes_updated(self, ri, action, route):
|
||||
LOG.debug("DummyDriver routes_updated() called.")
|
||||
|
||||
def clear_connection(self):
|
||||
LOG.debug("DummyDriver clear_connection() called.")
|
@ -1,175 +0,0 @@
|
||||
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import datetime
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import timeutils
|
||||
|
||||
from neutron.agent.linux import utils as linux_utils
|
||||
from neutron.i18n import _LI, _LW
|
||||
from neutron.openstack.common import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
STATUS_OPTS = [
|
||||
cfg.IntOpt('device_connection_timeout', default=30,
|
||||
help=_("Time in seconds for connecting to a hosting device")),
|
||||
cfg.IntOpt('hosting_device_dead_timeout', default=300,
|
||||
help=_("The time in seconds until a backlogged hosting device "
|
||||
"is presumed dead. This value should be set up high "
|
||||
"enough to recover from a period of connectivity loss "
|
||||
"or high load when the device may not be responding.")),
|
||||
]
|
||||
|
||||
cfg.CONF.register_opts(STATUS_OPTS, "cfg_agent")
|
||||
|
||||
|
||||
def _is_pingable(ip):
|
||||
"""Checks whether an IP address is reachable by pinging.
|
||||
|
||||
Use linux utils to execute the ping (ICMP ECHO) command.
|
||||
Sends 5 packets with an interval of 0.2 seconds and timeout of 1
|
||||
seconds. Runtime error implies unreachability else IP is pingable.
|
||||
:param ip: IP to check
|
||||
:return: bool - True or False depending on pingability.
|
||||
"""
|
||||
ping_cmd = ['ping',
|
||||
'-c', '5',
|
||||
'-W', '1',
|
||||
'-i', '0.2',
|
||||
ip]
|
||||
try:
|
||||
linux_utils.execute(ping_cmd, check_exit_code=True)
|
||||
return True
|
||||
except RuntimeError:
|
||||
LOG.warning(_LW("Cannot ping ip address: %s"), ip)
|
||||
return False
|
||||
|
||||
|
||||
class DeviceStatus(object):
|
||||
"""Device status and backlog processing."""
|
||||
|
||||
_instance = None
|
||||
|
||||
def __new__(cls):
|
||||
if not cls._instance:
|
||||
cls._instance = super(DeviceStatus, cls).__new__(cls)
|
||||
return cls._instance
|
||||
|
||||
def __init__(self):
|
||||
self.backlog_hosting_devices = {}
|
||||
|
||||
def get_backlogged_hosting_devices(self):
|
||||
return self.backlog_hosting_devices.keys()
|
||||
|
||||
def get_backlogged_hosting_devices_info(self):
|
||||
wait_time = datetime.timedelta(
|
||||
seconds=cfg.CONF.cfg_agent.hosting_device_dead_timeout)
|
||||
resp = []
|
||||
for hd_id in self.backlog_hosting_devices:
|
||||
hd = self.backlog_hosting_devices[hd_id]['hd']
|
||||
created_time = hd['created_at']
|
||||
boottime = datetime.timedelta(seconds=hd['booting_time'])
|
||||
backlogged_at = hd['backlog_insertion_ts']
|
||||
booted_at = created_time + boottime
|
||||
dead_at = backlogged_at + wait_time
|
||||
resp.append({'host id': hd['id'],
|
||||
'created at': str(created_time),
|
||||
'backlogged at': str(backlogged_at),
|
||||
'estimate booted at': str(booted_at),
|
||||
'considered dead at': str(dead_at)})
|
||||
return resp
|
||||
|
||||
def is_hosting_device_reachable(self, hosting_device):
|
||||
"""Check the hosting device which hosts this resource is reachable.
|
||||
|
||||
If the resource is not reachable, it is added to the backlog.
|
||||
|
||||
:param hosting_device : dict of the hosting device
|
||||
:return True if device is reachable, else None
|
||||
"""
|
||||
hd = hosting_device
|
||||
hd_id = hosting_device['id']
|
||||
hd_mgmt_ip = hosting_device['management_ip_address']
|
||||
# Modifying the 'created_at' to a date time object
|
||||
hosting_device['created_at'] = datetime.datetime.strptime(
|
||||
hosting_device['created_at'], '%Y-%m-%d %H:%M:%S')
|
||||
|
||||
if hd_id not in self.backlog_hosting_devices:
|
||||
if _is_pingable(hd_mgmt_ip):
|
||||
LOG.debug("Hosting device: %(hd_id)s@%(ip)s is reachable.",
|
||||
{'hd_id': hd_id, 'ip': hd_mgmt_ip})
|
||||
return True
|
||||
LOG.debug("Hosting device: %(hd_id)s@%(ip)s is NOT reachable.",
|
||||
{'hd_id': hd_id, 'ip': hd_mgmt_ip})
|
||||
hd['backlog_insertion_ts'] = max(
|
||||
timeutils.utcnow(),
|
||||
hd['created_at'] +
|
||||
datetime.timedelta(seconds=hd['booting_time']))
|
||||
self.backlog_hosting_devices[hd_id] = {'hd': hd}
|
||||
LOG.debug("Hosting device: %(hd_id)s @ %(ip)s is now added "
|
||||
"to backlog", {'hd_id': hd_id, 'ip': hd_mgmt_ip})
|
||||
|
||||
def check_backlogged_hosting_devices(self):
|
||||
""""Checks the status of backlogged hosting devices.
|
||||
|
||||
Skips newly spun up instances during their booting time as specified
|
||||
in the boot time parameter.
|
||||
|
||||
:return A dict of the format:
|
||||
{'reachable': [<hd_id>,..], 'dead': [<hd_id>,..]}
|
||||
"""
|
||||
response_dict = {'reachable': [], 'dead': []}
|
||||
LOG.debug("Current Backlogged hosting devices: %s",
|
||||
self.backlog_hosting_devices.keys())
|
||||
for hd_id in self.backlog_hosting_devices.keys():
|
||||
hd = self.backlog_hosting_devices[hd_id]['hd']
|
||||
if not timeutils.is_older_than(hd['created_at'],
|
||||
hd['booting_time']):
|
||||
LOG.info(_LI("Hosting device: %(hd_id)s @ %(ip)s hasn't "
|
||||
"passed minimum boot time. Skipping it. "),
|
||||
{'hd_id': hd_id, 'ip': hd['management_ip_address']})
|
||||
continue
|
||||
LOG.info(_LI("Checking hosting device: %(hd_id)s @ %(ip)s for "
|
||||
"reachability."), {'hd_id': hd_id,
|
||||
'ip': hd['management_ip_address']})
|
||||
if _is_pingable(hd['management_ip_address']):
|
||||
hd.pop('backlog_insertion_ts', None)
|
||||
del self.backlog_hosting_devices[hd_id]
|
||||
response_dict['reachable'].append(hd_id)
|
||||
LOG.info(_LI("Hosting device: %(hd_id)s @ %(ip)s is now "
|
||||
"reachable. Adding it to response"),
|
||||
{'hd_id': hd_id, 'ip': hd['management_ip_address']})
|
||||
else:
|
||||
LOG.info(_LI("Hosting device: %(hd_id)s @ %(ip)s still not "
|
||||
"reachable "), {'hd_id': hd_id,
|
||||
'ip': hd['management_ip_address']})
|
||||
if timeutils.is_older_than(
|
||||
hd['backlog_insertion_ts'],
|
||||
cfg.CONF.cfg_agent.hosting_device_dead_timeout):
|
||||
LOG.debug("Hosting device: %(hd_id)s @ %(ip)s hasn't "
|
||||
"been reachable for the last %(time)d seconds. "
|
||||
"Marking it dead.",
|
||||
{'hd_id': hd_id,
|
||||
'ip': hd['management_ip_address'],
|
||||
'time': cfg.CONF.cfg_agent.
|
||||
hosting_device_dead_timeout})
|
||||
response_dict['dead'].append(hd_id)
|
||||
hd.pop('backlog_insertion_ts', None)
|
||||
del self.backlog_hosting_devices[hd_id]
|
||||
LOG.debug("Response: %s", response_dict)
|
||||
return response_dict
|
@ -1,635 +0,0 @@
|
||||
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import collections
|
||||
|
||||
import eventlet
|
||||
import netaddr
|
||||
import oslo_messaging
|
||||
from oslo_utils import excutils
|
||||
|
||||
from neutron.common import constants as l3_constants
|
||||
from neutron.common import rpc as n_rpc
|
||||
from neutron.common import topics
|
||||
from neutron.common import utils as common_utils
|
||||
from neutron import context as n_context
|
||||
from neutron.i18n import _LE, _LI, _LW
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.cisco.cfg_agent import cfg_exceptions
|
||||
from neutron.plugins.cisco.cfg_agent.device_drivers import driver_mgr
|
||||
from neutron.plugins.cisco.cfg_agent import device_status
|
||||
from neutron.plugins.cisco.common import cisco_constants as c_constants
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
N_ROUTER_PREFIX = 'nrouter-'
|
||||
|
||||
|
||||
class RouterInfo(object):
|
||||
"""Wrapper class around the (neutron) router dictionary.
|
||||
|
||||
Information about the neutron router is exchanged as a python dictionary
|
||||
between plugin and config agent. RouterInfo is a wrapper around that dict,
|
||||
with attributes for common parameters. These attributes keep the state
|
||||
of the current router configuration, and are used for detecting router
|
||||
state changes when an updated router dict is received.
|
||||
|
||||
This is a modified version of the RouterInfo class defined in the
|
||||
(reference) l3-agent implementation, for use with cisco config agent.
|
||||
"""
|
||||
|
||||
def __init__(self, router_id, router):
|
||||
self.router_id = router_id
|
||||
self.ex_gw_port = None
|
||||
self._snat_enabled = None
|
||||
self._snat_action = None
|
||||
self.internal_ports = []
|
||||
self.floating_ips = []
|
||||
self._router = None
|
||||
self.router = router
|
||||
self.routes = []
|
||||
self.ha_info = router.get('ha_info')
|
||||
|
||||
@property
|
||||
def router(self):
|
||||
return self._router
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
return self.router_id
|
||||
|
||||
@property
|
||||
def snat_enabled(self):
|
||||
return self._snat_enabled
|
||||
|
||||
@router.setter
|
||||
def router(self, value):
|
||||
self._router = value
|
||||
if not self._router:
|
||||
return
|
||||
# enable_snat by default if it wasn't specified by plugin
|
||||
self._snat_enabled = self._router.get('enable_snat', True)
|
||||
|
||||
def router_name(self):
|
||||
return N_ROUTER_PREFIX + self.router_id
|
||||
|
||||
|
||||
class CiscoRoutingPluginApi(object):
|
||||
"""RoutingServiceHelper(Agent) side of the routing RPC API."""
|
||||
|
||||
def __init__(self, topic, host):
|
||||
self.host = host
|
||||
target = oslo_messaging.Target(topic=topic, version='1.0')
|
||||
self.client = n_rpc.get_client(target)
|
||||
|
||||
def get_routers(self, context, router_ids=None, hd_ids=None):
|
||||
"""Make a remote process call to retrieve the sync data for routers.
|
||||
|
||||
:param context: session context
|
||||
:param router_ids: list of routers to fetch
|
||||
:param hd_ids : hosting device ids, only routers assigned to these
|
||||
hosting devices will be returned.
|
||||
"""
|
||||
cctxt = self.client.prepare(version='1.1')
|
||||
return cctxt.call(context, 'cfg_sync_routers', host=self.host,
|
||||
router_ids=router_ids, hosting_device_ids=hd_ids)
|
||||
|
||||
|
||||
class RoutingServiceHelper(object):
|
||||
|
||||
def __init__(self, host, conf, cfg_agent):
|
||||
self.conf = conf
|
||||
self.cfg_agent = cfg_agent
|
||||
self.context = n_context.get_admin_context_without_session()
|
||||
self.plugin_rpc = CiscoRoutingPluginApi(topics.L3PLUGIN, host)
|
||||
self._dev_status = device_status.DeviceStatus()
|
||||
self._drivermgr = driver_mgr.DeviceDriverManager()
|
||||
|
||||
self.router_info = {}
|
||||
self.updated_routers = set()
|
||||
self.removed_routers = set()
|
||||
self.sync_devices = set()
|
||||
self.fullsync = True
|
||||
self.topic = '%s.%s' % (c_constants.CFG_AGENT_L3_ROUTING, host)
|
||||
|
||||
self._setup_rpc()
|
||||
|
||||
def _setup_rpc(self):
|
||||
self.conn = n_rpc.create_connection(new=True)
|
||||
self.endpoints = [self]
|
||||
self.conn.create_consumer(self.topic, self.endpoints, fanout=False)
|
||||
self.conn.consume_in_threads()
|
||||
|
||||
### Notifications from Plugin ####
|
||||
|
||||
def router_deleted(self, context, routers):
|
||||
"""Deal with router deletion RPC message."""
|
||||
LOG.debug('Got router deleted notification for %s', routers)
|
||||
self.removed_routers.update(routers)
|
||||
|
||||
def routers_updated(self, context, routers):
|
||||
"""Deal with routers modification and creation RPC message."""
|
||||
LOG.debug('Got routers updated notification :%s', routers)
|
||||
if routers:
|
||||
# This is needed for backward compatibility
|
||||
if isinstance(routers[0], dict):
|
||||
routers = [router['id'] for router in routers]
|
||||
self.updated_routers.update(routers)
|
||||
|
||||
def router_removed_from_agent(self, context, payload):
|
||||
LOG.debug('Got router removed from agent :%r', payload)
|
||||
self.removed_routers.add(payload['router_id'])
|
||||
|
||||
def router_added_to_agent(self, context, payload):
|
||||
LOG.debug('Got router added to agent :%r', payload)
|
||||
self.routers_updated(context, payload)
|
||||
|
||||
# Routing service helper public methods
|
||||
|
||||
def process_service(self, device_ids=None, removed_devices_info=None):
|
||||
try:
|
||||
LOG.debug("Routing service processing started")
|
||||
resources = {}
|
||||
routers = []
|
||||
removed_routers = []
|
||||
all_routers_flag = False
|
||||
if self.fullsync:
|
||||
LOG.debug("FullSync flag is on. Starting fullsync")
|
||||
# Setting all_routers_flag and clear the global full_sync flag
|
||||
all_routers_flag = True
|
||||
self.fullsync = False
|
||||
self.updated_routers.clear()
|
||||
self.removed_routers.clear()
|
||||
self.sync_devices.clear()
|
||||
routers = self._fetch_router_info(all_routers=True)
|
||||
else:
|
||||
if self.updated_routers:
|
||||
router_ids = list(self.updated_routers)
|
||||
LOG.debug("Updated routers:%s", router_ids)
|
||||
self.updated_routers.clear()
|
||||
routers = self._fetch_router_info(router_ids=router_ids)
|
||||
if device_ids:
|
||||
LOG.debug("Adding new devices:%s", device_ids)
|
||||
self.sync_devices = set(device_ids) | self.sync_devices
|
||||
if self.sync_devices:
|
||||
sync_devices_list = list(self.sync_devices)
|
||||
LOG.debug("Fetching routers on:%s", sync_devices_list)
|
||||
routers.extend(self._fetch_router_info(
|
||||
device_ids=sync_devices_list))
|
||||
self.sync_devices.clear()
|
||||
if removed_devices_info:
|
||||
if removed_devices_info.get('deconfigure'):
|
||||
ids = self._get_router_ids_from_removed_devices_info(
|
||||
removed_devices_info)
|
||||
self.removed_routers = self.removed_routers | set(ids)
|
||||
if self.removed_routers:
|
||||
removed_routers_ids = list(self.removed_routers)
|
||||
LOG.debug("Removed routers:%s", removed_routers_ids)
|
||||
for r in removed_routers_ids:
|
||||
if r in self.router_info:
|
||||
removed_routers.append(self.router_info[r].router)
|
||||
|
||||
# Sort on hosting device
|
||||
if routers:
|
||||
resources['routers'] = routers
|
||||
if removed_routers:
|
||||
resources['removed_routers'] = removed_routers
|
||||
hosting_devices = self._sort_resources_per_hosting_device(
|
||||
resources)
|
||||
|
||||
# Dispatch process_services() for each hosting device
|
||||
pool = eventlet.GreenPool()
|
||||
for device_id, resources in hosting_devices.items():
|
||||
routers = resources.get('routers')
|
||||
removed_routers = resources.get('removed_routers')
|
||||
pool.spawn_n(self._process_routers, routers, removed_routers,
|
||||
device_id, all_routers=all_routers_flag)
|
||||
pool.waitall()
|
||||
if removed_devices_info:
|
||||
for hd_id in removed_devices_info['hosting_data']:
|
||||
self._drivermgr.remove_driver_for_hosting_device(hd_id)
|
||||
LOG.debug("Routing service processing successfully completed")
|
||||
except Exception:
|
||||
LOG.exception(_LE("Failed processing routers"))
|
||||
self.fullsync = True
|
||||
|
||||
def collect_state(self, configurations):
|
||||
"""Collect state from this helper.
|
||||
|
||||
A set of attributes which summarizes the state of the routers and
|
||||
configurations managed by this config agent.
|
||||
:param configurations: dict of configuration values
|
||||
:return dict of updated configuration values
|
||||
"""
|
||||
num_ex_gw_ports = 0
|
||||
num_interfaces = 0
|
||||
num_floating_ips = 0
|
||||
router_infos = self.router_info.values()
|
||||
num_routers = len(router_infos)
|
||||
num_hd_routers = collections.defaultdict(int)
|
||||
for ri in router_infos:
|
||||
ex_gw_port = ri.router.get('gw_port')
|
||||
if ex_gw_port:
|
||||
num_ex_gw_ports += 1
|
||||
num_interfaces += len(ri.router.get(
|
||||
l3_constants.INTERFACE_KEY, []))
|
||||
num_floating_ips += len(ri.router.get(
|
||||
l3_constants.FLOATINGIP_KEY, []))
|
||||
hd = ri.router['hosting_device']
|
||||
if hd:
|
||||
num_hd_routers[hd['id']] += 1
|
||||
routers_per_hd = dict((hd_id, {'routers': num})
|
||||
for hd_id, num in num_hd_routers.items())
|
||||
non_responding = self._dev_status.get_backlogged_hosting_devices()
|
||||
configurations['total routers'] = num_routers
|
||||
configurations['total ex_gw_ports'] = num_ex_gw_ports
|
||||
configurations['total interfaces'] = num_interfaces
|
||||
configurations['total floating_ips'] = num_floating_ips
|
||||
configurations['hosting_devices'] = routers_per_hd
|
||||
configurations['non_responding_hosting_devices'] = non_responding
|
||||
return configurations
|
||||
|
||||
# Routing service helper internal methods
|
||||
|
||||
def _fetch_router_info(self, router_ids=None, device_ids=None,
|
||||
all_routers=False):
|
||||
"""Fetch router dict from the routing plugin.
|
||||
|
||||
:param router_ids: List of router_ids of routers to fetch
|
||||
:param device_ids: List of device_ids whose routers to fetch
|
||||
:param all_routers: If True fetch all the routers for this agent.
|
||||
:return: List of router dicts of format:
|
||||
[ {router_dict1}, {router_dict2},.....]
|
||||
"""
|
||||
try:
|
||||
if all_routers:
|
||||
return self.plugin_rpc.get_routers(self.context)
|
||||
if router_ids:
|
||||
return self.plugin_rpc.get_routers(self.context,
|
||||
router_ids=router_ids)
|
||||
if device_ids:
|
||||
return self.plugin_rpc.get_routers(self.context,
|
||||
hd_ids=device_ids)
|
||||
except oslo_messaging.MessagingException:
|
||||
LOG.exception(_LE("RPC Error in fetching routers from plugin"))
|
||||
self.fullsync = True
|
||||
|
||||
@staticmethod
|
||||
def _get_router_ids_from_removed_devices_info(removed_devices_info):
|
||||
"""Extract router_ids from the removed devices info dict.
|
||||
|
||||
:param removed_devices_info: Dict of removed devices and their
|
||||
associated resources.
|
||||
Format:
|
||||
{
|
||||
'hosting_data': {'hd_id1': {'routers': [id1, id2, ...]},
|
||||
'hd_id2': {'routers': [id3, id4, ...]},
|
||||
...
|
||||
},
|
||||
'deconfigure': True/False
|
||||
}
|
||||
:return removed_router_ids: List of removed router ids
|
||||
"""
|
||||
removed_router_ids = []
|
||||
for hd_id, resources in removed_devices_info['hosting_data'].items():
|
||||
removed_router_ids += resources.get('routers', [])
|
||||
return removed_router_ids
|
||||
|
||||
@staticmethod
|
||||
def _sort_resources_per_hosting_device(resources):
|
||||
"""This function will sort the resources on hosting device.
|
||||
|
||||
The sorting on hosting device is done by looking up the
|
||||
`hosting_device` attribute of the resource, and its `id`.
|
||||
|
||||
:param resources: a dict with key of resource name
|
||||
:return dict sorted on the hosting device of input resource. Format:
|
||||
hosting_devices = {
|
||||
'hd_id1' : {'routers':[routers],
|
||||
'removed_routers':[routers], .... }
|
||||
'hd_id2' : {'routers':[routers], .. }
|
||||
.......
|
||||
}
|
||||
"""
|
||||
hosting_devices = {}
|
||||
for key in resources.keys():
|
||||
for r in resources.get(key) or []:
|
||||
hd_id = r['hosting_device']['id']
|
||||
hosting_devices.setdefault(hd_id, {})
|
||||
hosting_devices[hd_id].setdefault(key, []).append(r)
|
||||
return hosting_devices
|
||||
|
||||
def _process_routers(self, routers, removed_routers,
|
||||
device_id=None, all_routers=False):
|
||||
"""Process the set of routers.
|
||||
|
||||
Iterating on the set of routers received and comparing it with the
|
||||
set of routers already in the routing service helper, new routers
|
||||
which are added are identified. Before processing check the
|
||||
reachability (via ping) of hosting device where the router is hosted.
|
||||
If device is not reachable it is backlogged.
|
||||
|
||||
For routers which are only updated, call `_process_router()` on them.
|
||||
|
||||
When all_routers is set to True (because of a full sync),
|
||||
this will result in the detection and deletion of routers which
|
||||
have been removed.
|
||||
|
||||
Whether the router can only be assigned to a particular hosting device
|
||||
is decided and enforced by the plugin. No checks are done here.
|
||||
|
||||
:param routers: The set of routers to be processed
|
||||
:param removed_routers: the set of routers which where removed
|
||||
:param device_id: Id of the hosting device
|
||||
:param all_routers: Flag for specifying a partial list of routers
|
||||
:return: None
|
||||
"""
|
||||
try:
|
||||
if all_routers:
|
||||
prev_router_ids = set(self.router_info)
|
||||
else:
|
||||
prev_router_ids = set(self.router_info) & set(
|
||||
[router['id'] for router in routers])
|
||||
cur_router_ids = set()
|
||||
for r in routers:
|
||||
try:
|
||||
if not r['admin_state_up']:
|
||||
continue
|
||||
cur_router_ids.add(r['id'])
|
||||
hd = r['hosting_device']
|
||||
if not self._dev_status.is_hosting_device_reachable(hd):
|
||||
LOG.info(_LI("Router: %(id)s is on an unreachable "
|
||||
"hosting device. "), {'id': r['id']})
|
||||
continue
|
||||
if r['id'] not in self.router_info:
|
||||
self._router_added(r['id'], r)
|
||||
ri = self.router_info[r['id']]
|
||||
ri.router = r
|
||||
self._process_router(ri)
|
||||
except KeyError as e:
|
||||
LOG.exception(_LE("Key Error, missing key: %s"), e)
|
||||
self.updated_routers.add(r['id'])
|
||||
continue
|
||||
except cfg_exceptions.DriverException as e:
|
||||
LOG.exception(_LE("Driver Exception on router:%(id)s. "
|
||||
"Error is %(e)s"), {'id': r['id'], 'e': e})
|
||||
self.updated_routers.update(r['id'])
|
||||
continue
|
||||
# identify and remove routers that no longer exist
|
||||
for router_id in prev_router_ids - cur_router_ids:
|
||||
self._router_removed(router_id)
|
||||
if removed_routers:
|
||||
for router in removed_routers:
|
||||
self._router_removed(router['id'])
|
||||
except Exception:
|
||||
LOG.exception(_LE("Exception in processing routers on device:%s"),
|
||||
device_id)
|
||||
self.sync_devices.add(device_id)
|
||||
|
||||
def _process_router(self, ri):
|
||||
"""Process a router, apply latest configuration and update router_info.
|
||||
|
||||
Get the router dict from RouterInfo and proceed to detect changes
|
||||
from the last known state. When new ports or deleted ports are
|
||||
detected, `internal_network_added()` or `internal_networks_removed()`
|
||||
are called accordingly. Similarly changes in ex_gw_port causes
|
||||
`external_gateway_added()` or `external_gateway_removed()` calls.
|
||||
Next, floating_ips and routes are processed. Also, latest state is
|
||||
stored in ri.internal_ports and ri.ex_gw_port for future comparisons.
|
||||
|
||||
:param ri : RouterInfo object of the router being processed.
|
||||
:return:None
|
||||
:raises: neutron.plugins.cisco.cfg_agent.cfg_exceptions.DriverException
|
||||
if the configuration operation fails.
|
||||
"""
|
||||
try:
|
||||
ex_gw_port = ri.router.get('gw_port')
|
||||
ri.ha_info = ri.router.get('ha_info', None)
|
||||
internal_ports = ri.router.get(l3_constants.INTERFACE_KEY, [])
|
||||
existing_port_ids = set([p['id'] for p in ri.internal_ports])
|
||||
current_port_ids = set([p['id'] for p in internal_ports
|
||||
if p['admin_state_up']])
|
||||
new_ports = [p for p in internal_ports
|
||||
if
|
||||
p['id'] in (current_port_ids - existing_port_ids)]
|
||||
old_ports = [p for p in ri.internal_ports
|
||||
if p['id'] not in current_port_ids]
|
||||
|
||||
for p in new_ports:
|
||||
self._set_subnet_info(p)
|
||||
self._internal_network_added(ri, p, ex_gw_port)
|
||||
ri.internal_ports.append(p)
|
||||
|
||||
for p in old_ports:
|
||||
self._internal_network_removed(ri, p, ri.ex_gw_port)
|
||||
ri.internal_ports.remove(p)
|
||||
|
||||
if ex_gw_port and not ri.ex_gw_port:
|
||||
self._set_subnet_info(ex_gw_port)
|
||||
self._external_gateway_added(ri, ex_gw_port)
|
||||
elif not ex_gw_port and ri.ex_gw_port:
|
||||
self._external_gateway_removed(ri, ri.ex_gw_port)
|
||||
|
||||
if ex_gw_port:
|
||||
self._process_router_floating_ips(ri, ex_gw_port)
|
||||
|
||||
ri.ex_gw_port = ex_gw_port
|
||||
self._routes_updated(ri)
|
||||
except cfg_exceptions.DriverException as e:
|
||||
with excutils.save_and_reraise_exception():
|
||||
self.updated_routers.update(ri.router_id)
|
||||
LOG.error(e)
|
||||
|
||||
def _process_router_floating_ips(self, ri, ex_gw_port):
|
||||
"""Process a router's floating ips.
|
||||
|
||||
Compare current floatingips (in ri.floating_ips) with the router's
|
||||
updated floating ips (in ri.router.floating_ips) and detect
|
||||
flaoting_ips which were added or removed. Notify driver of
|
||||
the change via `floating_ip_added()` or `floating_ip_removed()`.
|
||||
|
||||
:param ri: RouterInfo object of the router being processed.
|
||||
:param ex_gw_port: Port dict of the external gateway port.
|
||||
:return: None
|
||||
:raises: neutron.plugins.cisco.cfg_agent.cfg_exceptions.DriverException
|
||||
if the configuration operation fails.
|
||||
"""
|
||||
|
||||
floating_ips = ri.router.get(l3_constants.FLOATINGIP_KEY, [])
|
||||
existing_floating_ip_ids = set(
|
||||
[fip['id'] for fip in ri.floating_ips])
|
||||
cur_floating_ip_ids = set([fip['id'] for fip in floating_ips])
|
||||
|
||||
id_to_fip_map = {}
|
||||
|
||||
for fip in floating_ips:
|
||||
if fip['port_id']:
|
||||
# store to see if floatingip was remapped
|
||||
id_to_fip_map[fip['id']] = fip
|
||||
if fip['id'] not in existing_floating_ip_ids:
|
||||
ri.floating_ips.append(fip)
|
||||
self._floating_ip_added(ri, ex_gw_port,
|
||||
fip['floating_ip_address'],
|
||||
fip['fixed_ip_address'])
|
||||
|
||||
floating_ip_ids_to_remove = (existing_floating_ip_ids -
|
||||
cur_floating_ip_ids)
|
||||
for fip in ri.floating_ips:
|
||||
if fip['id'] in floating_ip_ids_to_remove:
|
||||
ri.floating_ips.remove(fip)
|
||||
self._floating_ip_removed(ri, ri.ex_gw_port,
|
||||
fip['floating_ip_address'],
|
||||
fip['fixed_ip_address'])
|
||||
else:
|
||||
# handle remapping of a floating IP
|
||||
new_fip = id_to_fip_map[fip['id']]
|
||||
new_fixed_ip = new_fip['fixed_ip_address']
|
||||
existing_fixed_ip = fip['fixed_ip_address']
|
||||
if (new_fixed_ip and existing_fixed_ip and
|
||||
new_fixed_ip != existing_fixed_ip):
|
||||
floating_ip = fip['floating_ip_address']
|
||||
self._floating_ip_removed(ri, ri.ex_gw_port,
|
||||
floating_ip,
|
||||
existing_fixed_ip)
|
||||
self._floating_ip_added(ri, ri.ex_gw_port,
|
||||
floating_ip, new_fixed_ip)
|
||||
ri.floating_ips.remove(fip)
|
||||
ri.floating_ips.append(new_fip)
|
||||
|
||||
def _router_added(self, router_id, router):
|
||||
"""Operations when a router is added.
|
||||
|
||||
Create a new RouterInfo object for this router and add it to the
|
||||
service helpers router_info dictionary. Then `router_added()` is
|
||||
called on the device driver.
|
||||
|
||||
:param router_id: id of the router
|
||||
:param router: router dict
|
||||
:return: None
|
||||
"""
|
||||
ri = RouterInfo(router_id, router)
|
||||
driver = self._drivermgr.set_driver(router)
|
||||
driver.router_added(ri)
|
||||
self.router_info[router_id] = ri
|
||||
|
||||
def _router_removed(self, router_id, deconfigure=True):
|
||||
"""Operations when a router is removed.
|
||||
|
||||
Get the RouterInfo object corresponding to the router in the service
|
||||
helpers's router_info dict. If deconfigure is set to True,
|
||||
remove this router's configuration from the hosting device.
|
||||
:param router_id: id of the router
|
||||
:param deconfigure: if True, the router's configuration is deleted from
|
||||
the hosting device.
|
||||
:return: None
|
||||
"""
|
||||
ri = self.router_info.get(router_id)
|
||||
if ri is None:
|
||||
LOG.warning(_LW("Info for router %s was not found. "
|
||||
"Skipping router removal"), router_id)
|
||||
return
|
||||
ri.router['gw_port'] = None
|
||||
ri.router[l3_constants.INTERFACE_KEY] = []
|
||||
ri.router[l3_constants.FLOATINGIP_KEY] = []
|
||||
try:
|
||||
if deconfigure:
|
||||
self._process_router(ri)
|
||||
driver = self._drivermgr.get_driver(router_id)
|
||||
driver.router_removed(ri, deconfigure)
|
||||
self._drivermgr.remove_driver(router_id)
|
||||
del self.router_info[router_id]
|
||||
self.removed_routers.discard(router_id)
|
||||
except cfg_exceptions.DriverException:
|
||||
LOG.warning(_LW("Router remove for router_id: %s was incomplete. "
|
||||
"Adding the router to removed_routers list"), router_id)
|
||||
self.removed_routers.add(router_id)
|
||||
# remove this router from updated_routers if it is there. It might
|
||||
# end up there too if exception was thrown earlier inside
|
||||
# `_process_router()`
|
||||
self.updated_routers.discard(router_id)
|
||||
|
||||
def _internal_network_added(self, ri, port, ex_gw_port):
|
||||
driver = self._drivermgr.get_driver(ri.id)
|
||||
driver.internal_network_added(ri, port)
|
||||
if ri.snat_enabled and ex_gw_port:
|
||||
driver.enable_internal_network_NAT(ri, port, ex_gw_port)
|
||||
|
||||
def _internal_network_removed(self, ri, port, ex_gw_port):
|
||||
driver = self._drivermgr.get_driver(ri.id)
|
||||
driver.internal_network_removed(ri, port)
|
||||
if ri.snat_enabled and ex_gw_port:
|
||||
driver.disable_internal_network_NAT(ri, port, ex_gw_port)
|
||||
|
||||
def _external_gateway_added(self, ri, ex_gw_port):
|
||||
driver = self._drivermgr.get_driver(ri.id)
|
||||
driver.external_gateway_added(ri, ex_gw_port)
|
||||
if ri.snat_enabled and ri.internal_ports:
|
||||
for port in ri.internal_ports:
|
||||
driver.enable_internal_network_NAT(ri, port, ex_gw_port)
|
||||
|
||||
def _external_gateway_removed(self, ri, ex_gw_port):
|
||||
driver = self._drivermgr.get_driver(ri.id)
|
||||
if ri.snat_enabled and ri.internal_ports:
|
||||
for port in ri.internal_ports:
|
||||
driver.disable_internal_network_NAT(ri, port, ex_gw_port)
|
||||
driver.external_gateway_removed(ri, ex_gw_port)
|
||||
|
||||
def _floating_ip_added(self, ri, ex_gw_port, floating_ip, fixed_ip):
|
||||
driver = self._drivermgr.get_driver(ri.id)
|
||||
driver.floating_ip_added(ri, ex_gw_port, floating_ip, fixed_ip)
|
||||
|
||||
def _floating_ip_removed(self, ri, ex_gw_port, floating_ip, fixed_ip):
|
||||
driver = self._drivermgr.get_driver(ri.id)
|
||||
driver.floating_ip_removed(ri, ex_gw_port, floating_ip, fixed_ip)
|
||||
|
||||
def _routes_updated(self, ri):
|
||||
"""Update the state of routes in the router.
|
||||
|
||||
Compares the current routes with the (configured) existing routes
|
||||
and detect what was removed or added. Then configure the
|
||||
logical router in the hosting device accordingly.
|
||||
:param ri: RouterInfo corresponding to the router.
|
||||
:return: None
|
||||
:raises: neutron.plugins.cisco.cfg_agent.cfg_exceptions.DriverException
|
||||
if the configuration operation fails.
|
||||
"""
|
||||
new_routes = ri.router['routes']
|
||||
old_routes = ri.routes
|
||||
adds, removes = common_utils.diff_list_of_dict(old_routes,
|
||||
new_routes)
|
||||
for route in adds:
|
||||
LOG.debug("Added route entry is '%s'", route)
|
||||
# remove replaced route from deleted route
|
||||
for del_route in removes:
|
||||
if route['destination'] == del_route['destination']:
|
||||
removes.remove(del_route)
|
||||
driver = self._drivermgr.get_driver(ri.id)
|
||||
driver.routes_updated(ri, 'replace', route)
|
||||
|
||||
for route in removes:
|
||||
LOG.debug("Removed route entry is '%s'", route)
|
||||
driver = self._drivermgr.get_driver(ri.id)
|
||||
driver.routes_updated(ri, 'delete', route)
|
||||
ri.routes = new_routes
|
||||
|
||||
@staticmethod
|
||||
def _set_subnet_info(port):
|
||||
ips = port['fixed_ips']
|
||||
if not ips:
|
||||
raise Exception(_("Router port %s has no IP address") % port['id'])
|
||||
if len(ips) > 1:
|
||||
LOG.error(_LE("Ignoring multiple IPs on router port %s"),
|
||||
port['id'])
|
||||
prefixlen = netaddr.IPNetwork(port['subnet']['cidr']).prefixlen
|
||||
port['ip_cidr'] = "%s/%s" % (ips[0]['ip_address'], prefixlen)
|
@ -1,489 +0,0 @@
|
||||
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import random
|
||||
|
||||
from keystoneclient import exceptions as k_exceptions
|
||||
from keystoneclient.v2_0 import client as k_client
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import importutils
|
||||
from oslo_utils import timeutils
|
||||
from sqlalchemy.orm import exc
|
||||
from sqlalchemy.orm import joinedload
|
||||
|
||||
from neutron.common import exceptions as n_exc
|
||||
from neutron.common import utils
|
||||
from neutron import context as neutron_context
|
||||
from neutron.db import agents_db
|
||||
from neutron.i18n import _LE, _LI, _LW
|
||||
from neutron import manager
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.openstack.common import uuidutils
|
||||
from neutron.plugins.cisco.common import cisco_constants as c_constants
|
||||
from neutron.plugins.cisco.db.l3 import l3_models
|
||||
from neutron.plugins.cisco.l3 import service_vm_lib
|
||||
from neutron.plugins.common import constants as svc_constants
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
DEVICE_HANDLING_OPTS = [
|
||||
cfg.StrOpt('l3_admin_tenant', default='L3AdminTenant',
|
||||
help=_('Name of the L3 admin tenant.')),
|
||||
cfg.StrOpt('management_network', default='osn_mgmt_nw',
|
||||
help=_('Name of management network for device configuration. '
|
||||
'Default value is osn_mgmt_nw')),
|
||||
cfg.StrOpt('default_security_group', default='mgmt_sec_grp',
|
||||
help=_('Default security group applied on management port. '
|
||||
'Default value is mgmt_sec_grp.')),
|
||||
cfg.IntOpt('cfg_agent_down_time', default=60,
|
||||
help=_('Seconds of no status update until a cfg agent '
|
||||
'is considered down.')),
|
||||
cfg.BoolOpt('ensure_nova_running', default=True,
|
||||
help=_('Ensure that Nova is running before attempting to '
|
||||
'create any VM.'))
|
||||
]
|
||||
|
||||
CSR1KV_OPTS = [
|
||||
cfg.StrOpt('csr1kv_image', default='csr1kv_openstack_img',
|
||||
help=_('Name of Glance image for CSR1kv.')),
|
||||
cfg.StrOpt('csr1kv_flavor', default=621,
|
||||
help=_('UUID of Nova flavor for CSR1kv.')),
|
||||
cfg.StrOpt('csr1kv_plugging_driver',
|
||||
default=('neutron.plugins.cisco.l3.plugging_drivers.'
|
||||
'n1kv_trunking_driver.N1kvTrunkingPlugDriver'),
|
||||
help=_('Plugging driver for CSR1kv.')),
|
||||
cfg.StrOpt('csr1kv_device_driver',
|
||||
default=('neutron.plugins.cisco.l3.hosting_device_drivers.'
|
||||
'csr1kv_hd_driver.CSR1kvHostingDeviceDriver'),
|
||||
help=_('Hosting device driver for CSR1kv.')),
|
||||
cfg.StrOpt('csr1kv_cfgagent_router_driver',
|
||||
default=('neutron.plugins.cisco.cfg_agent.device_drivers.'
|
||||
'csr1kv.csr1kv_routing_driver.CSR1kvRoutingDriver'),
|
||||
help=_('Config agent driver for CSR1kv.')),
|
||||
cfg.IntOpt('csr1kv_booting_time', default=420,
|
||||
help=_('Booting time in seconds before a CSR1kv '
|
||||
'becomes operational.')),
|
||||
cfg.StrOpt('csr1kv_username', default='stack',
|
||||
help=_('Username to use for CSR1kv configurations.')),
|
||||
cfg.StrOpt('csr1kv_password', default='cisco', secret=True,
|
||||
help=_('Password to use for CSR1kv configurations.'))
|
||||
]
|
||||
|
||||
cfg.CONF.register_opts(DEVICE_HANDLING_OPTS, "general")
|
||||
cfg.CONF.register_opts(CSR1KV_OPTS, "hosting_devices")
|
||||
|
||||
|
||||
class DeviceHandlingMixin(object):
|
||||
"""A class implementing some functionality to handle devices."""
|
||||
|
||||
# The all-mighty tenant owning all hosting devices
|
||||
_l3_tenant_uuid = None
|
||||
# The management network for hosting devices
|
||||
_mgmt_nw_uuid = None
|
||||
_mgmt_sec_grp_id = None
|
||||
|
||||
# Loaded driver modules for CSR1kv
|
||||
_hosting_device_driver = None
|
||||
_plugging_driver = None
|
||||
|
||||
# Service VM manager object that interacts with Nova
|
||||
_svc_vm_mgr = None
|
||||
|
||||
# Flag indicating is needed Nova services are reported as up.
|
||||
_nova_running = False
|
||||
|
||||
@classmethod
|
||||
def l3_tenant_id(cls):
|
||||
"""Returns id of tenant owning hosting device resources."""
|
||||
if cls._l3_tenant_uuid is None:
|
||||
auth_url = cfg.CONF.keystone_authtoken.auth_uri
|
||||
user = cfg.CONF.keystone_authtoken.admin_user
|
||||
pw = cfg.CONF.keystone_authtoken.admin_password
|
||||
tenant = cfg.CONF.keystone_authtoken.admin_tenant_name
|
||||
keystone = k_client.Client(username=user, password=pw,
|
||||
tenant_name=tenant,
|
||||
auth_url=auth_url)
|
||||
try:
|
||||
tenant = keystone.tenants.find(
|
||||
name=cfg.CONF.general.l3_admin_tenant)
|
||||
cls._l3_tenant_uuid = tenant.id
|
||||
except k_exceptions.NotFound:
|
||||
LOG.error(_LE('No tenant with a name or ID of %s exists.'),
|
||||
cfg.CONF.general.l3_admin_tenant)
|
||||
except k_exceptions.NoUniqueMatch:
|
||||
LOG.error(_LE('Multiple tenants matches found for %s'),
|
||||
cfg.CONF.general.l3_admin_tenant)
|
||||
return cls._l3_tenant_uuid
|
||||
|
||||
@classmethod
|
||||
def mgmt_nw_id(cls):
|
||||
"""Returns id of the management network."""
|
||||
if cls._mgmt_nw_uuid is None:
|
||||
tenant_id = cls.l3_tenant_id()
|
||||
if not tenant_id:
|
||||
return
|
||||
net = manager.NeutronManager.get_plugin().get_networks(
|
||||
neutron_context.get_admin_context(),
|
||||
{'tenant_id': [tenant_id],
|
||||
'name': [cfg.CONF.general.management_network]},
|
||||
['id', 'subnets'])
|
||||
if len(net) == 1:
|
||||
num_subnets = len(net[0]['subnets'])
|
||||
if num_subnets == 0:
|
||||
LOG.error(_LE('The virtual management network has no '
|
||||
'subnet. Please assign one.'))
|
||||
return
|
||||
elif num_subnets > 1:
|
||||
LOG.info(_LI('The virtual management network has %d '
|
||||
'subnets. The first one will be used.'),
|
||||
num_subnets)
|
||||
cls._mgmt_nw_uuid = net[0].get('id')
|
||||
elif len(net) > 1:
|
||||
# Management network must have a unique name.
|
||||
LOG.error(_LE('The virtual management network does not have '
|
||||
'unique name. Please ensure that it is.'))
|
||||
else:
|
||||
# Management network has not been created.
|
||||
LOG.error(_LE('There is no virtual management network. Please '
|
||||
'create one.'))
|
||||
return cls._mgmt_nw_uuid
|
||||
|
||||
@classmethod
|
||||
def mgmt_sec_grp_id(cls):
|
||||
"""Returns id of security group used by the management network."""
|
||||
if not utils.is_extension_supported(
|
||||
manager.NeutronManager.get_plugin(), "security-group"):
|
||||
return
|
||||
if cls._mgmt_sec_grp_id is None:
|
||||
# Get the id for the _mgmt_security_group_id
|
||||
tenant_id = cls.l3_tenant_id()
|
||||
res = manager.NeutronManager.get_plugin().get_security_groups(
|
||||
neutron_context.get_admin_context(),
|
||||
{'tenant_id': [tenant_id],
|
||||
'name': [cfg.CONF.general.default_security_group]},
|
||||
['id'])
|
||||
if len(res) == 1:
|
||||
cls._mgmt_sec_grp_id = res[0].get('id')
|
||||
elif len(res) > 1:
|
||||
# the mgmt sec group must be unique.
|
||||
LOG.error(_LE('The security group for the virtual management '
|
||||
'network does not have unique name. Please ensure '
|
||||
'that it is.'))
|
||||
else:
|
||||
# CSR Mgmt security group is not present.
|
||||
LOG.error(_LE('There is no security group for the virtual '
|
||||
'management network. Please create one.'))
|
||||
return cls._mgmt_sec_grp_id
|
||||
|
||||
@classmethod
|
||||
def get_hosting_device_driver(cls):
|
||||
"""Returns device driver."""
|
||||
if cls._hosting_device_driver:
|
||||
return cls._hosting_device_driver
|
||||
else:
|
||||
try:
|
||||
cls._hosting_device_driver = importutils.import_object(
|
||||
cfg.CONF.hosting_devices.csr1kv_device_driver)
|
||||
except (ImportError, TypeError, n_exc.NeutronException):
|
||||
LOG.exception(_LE('Error loading hosting device driver'))
|
||||
return cls._hosting_device_driver
|
||||
|
||||
@classmethod
|
||||
def get_hosting_device_plugging_driver(cls):
|
||||
"""Returns plugging driver."""
|
||||
if cls._plugging_driver:
|
||||
return cls._plugging_driver
|
||||
else:
|
||||
try:
|
||||
cls._plugging_driver = importutils.import_object(
|
||||
cfg.CONF.hosting_devices.csr1kv_plugging_driver)
|
||||
except (ImportError, TypeError, n_exc.NeutronException):
|
||||
LOG.exception(_LE('Error loading plugging driver'))
|
||||
return cls._plugging_driver
|
||||
|
||||
def get_hosting_devices_qry(self, context, hosting_device_ids,
|
||||
load_agent=True):
|
||||
"""Returns hosting devices with <hosting_device_ids>."""
|
||||
query = context.session.query(l3_models.HostingDevice)
|
||||
if load_agent:
|
||||
query = query.options(joinedload('cfg_agent'))
|
||||
if len(hosting_device_ids) > 1:
|
||||
query = query.filter(l3_models.HostingDevice.id.in_(
|
||||
hosting_device_ids))
|
||||
else:
|
||||
query = query.filter(l3_models.HostingDevice.id ==
|
||||
hosting_device_ids[0])
|
||||
return query
|
||||
|
||||
def handle_non_responding_hosting_devices(self, context, host,
|
||||
hosting_device_ids):
|
||||
with context.session.begin(subtransactions=True):
|
||||
e_context = context.elevated()
|
||||
hosting_devices = self.get_hosting_devices_qry(
|
||||
e_context, hosting_device_ids).all()
|
||||
# 'hosting_info' is dictionary with ids of removed hosting
|
||||
# devices and the affected logical resources for each
|
||||
# removed hosting device:
|
||||
# {'hd_id1': {'routers': [id1, id2, ...],
|
||||
# 'fw': [id1, ...],
|
||||
# ...},
|
||||
# 'hd_id2': {'routers': [id3, id4, ...]},
|
||||
# 'fw': [id1, ...],
|
||||
# ...},
|
||||
# ...}
|
||||
hosting_info = dict((id, {}) for id in hosting_device_ids)
|
||||
try:
|
||||
#TODO(bobmel): Modify so service plugins register themselves
|
||||
self._handle_non_responding_hosting_devices(
|
||||
context, hosting_devices, hosting_info)
|
||||
except AttributeError:
|
||||
pass
|
||||
for hd in hosting_devices:
|
||||
if not self._process_non_responsive_hosting_device(e_context,
|
||||
hd):
|
||||
# exclude this device since we did not remove it
|
||||
del hosting_info[hd['id']]
|
||||
self.l3_cfg_rpc_notifier.hosting_devices_removed(
|
||||
context, hosting_info, False, host)
|
||||
|
||||
def get_device_info_for_agent(self, hosting_device):
|
||||
"""Returns information about <hosting_device> needed by config agent.
|
||||
|
||||
Convenience function that service plugins can use to populate
|
||||
their resources with information about the device hosting their
|
||||
logical resource.
|
||||
"""
|
||||
credentials = {'username': cfg.CONF.hosting_devices.csr1kv_username,
|
||||
'password': cfg.CONF.hosting_devices.csr1kv_password}
|
||||
mgmt_ip = (hosting_device.management_port['fixed_ips'][0]['ip_address']
|
||||
if hosting_device.management_port else None)
|
||||
return {'id': hosting_device.id,
|
||||
'credentials': credentials,
|
||||
'management_ip_address': mgmt_ip,
|
||||
'protocol_port': hosting_device.protocol_port,
|
||||
'created_at': str(hosting_device.created_at),
|
||||
'booting_time': cfg.CONF.hosting_devices.csr1kv_booting_time,
|
||||
'cfg_agent_id': hosting_device.cfg_agent_id}
|
||||
|
||||
@classmethod
|
||||
def is_agent_down(cls, heart_beat_time,
|
||||
timeout=cfg.CONF.general.cfg_agent_down_time):
|
||||
return timeutils.is_older_than(heart_beat_time, timeout)
|
||||
|
||||
def get_cfg_agents_for_hosting_devices(self, context, hosting_device_ids,
|
||||
admin_state_up=None, active=None,
|
||||
schedule=False):
|
||||
if not hosting_device_ids:
|
||||
return []
|
||||
query = self.get_hosting_devices_qry(context, hosting_device_ids)
|
||||
if admin_state_up is not None:
|
||||
query = query.filter(
|
||||
agents_db.Agent.admin_state_up == admin_state_up)
|
||||
if schedule:
|
||||
agents = []
|
||||
for hosting_device in query:
|
||||
if hosting_device.cfg_agent is None:
|
||||
agent = self._select_cfgagent(context, hosting_device)
|
||||
if agent is not None:
|
||||
agents.append(agent)
|
||||
else:
|
||||
agents.append(hosting_device.cfg_agent)
|
||||
else:
|
||||
agents = [hosting_device.cfg_agent for hosting_device in query
|
||||
if hosting_device.cfg_agent is not None]
|
||||
if active is not None:
|
||||
agents = [a for a in agents if not
|
||||
self.is_agent_down(a['heartbeat_timestamp'])]
|
||||
return agents
|
||||
|
||||
def auto_schedule_hosting_devices(self, context, agent_host):
|
||||
"""Schedules unassociated hosting devices to Cisco cfg agent.
|
||||
|
||||
Schedules hosting devices to agent running on <agent_host>.
|
||||
"""
|
||||
with context.session.begin(subtransactions=True):
|
||||
# Check if there is a valid Cisco cfg agent on the host
|
||||
query = context.session.query(agents_db.Agent)
|
||||
query = query.filter_by(agent_type=c_constants.AGENT_TYPE_CFG,
|
||||
host=agent_host, admin_state_up=True)
|
||||
try:
|
||||
cfg_agent = query.one()
|
||||
except (exc.MultipleResultsFound, exc.NoResultFound):
|
||||
LOG.debug('No enabled Cisco cfg agent on host %s',
|
||||
agent_host)
|
||||
return False
|
||||
if self.is_agent_down(
|
||||
cfg_agent.heartbeat_timestamp):
|
||||
LOG.warning(_LW('Cisco cfg agent %s is not alive'),
|
||||
cfg_agent.id)
|
||||
query = context.session.query(l3_models.HostingDevice)
|
||||
query = query.filter_by(cfg_agent_id=None)
|
||||
for hd in query:
|
||||
hd.cfg_agent = cfg_agent
|
||||
context.session.add(hd)
|
||||
return True
|
||||
|
||||
def _setup_device_handling(self):
|
||||
auth_url = cfg.CONF.keystone_authtoken.auth_uri
|
||||
u_name = cfg.CONF.keystone_authtoken.admin_user
|
||||
pw = cfg.CONF.keystone_authtoken.admin_password
|
||||
tenant = cfg.CONF.general.l3_admin_tenant
|
||||
self._svc_vm_mgr = service_vm_lib.ServiceVMManager(
|
||||
user=u_name, passwd=pw, l3_admin_tenant=tenant, auth_url=auth_url)
|
||||
|
||||
def _process_non_responsive_hosting_device(self, context, hosting_device):
|
||||
"""Host type specific processing of non responsive hosting devices.
|
||||
|
||||
:param hosting_device: db object for hosting device
|
||||
:return: True if hosting_device has been deleted, otherwise False
|
||||
"""
|
||||
|
||||
self._delete_service_vm_hosting_device(context, hosting_device)
|
||||
return True
|
||||
|
||||
def _create_csr1kv_vm_hosting_device(self, context):
|
||||
"""Creates a CSR1kv VM instance."""
|
||||
# Note(bobmel): Nova does not handle VM dispatching well before all
|
||||
# its services have started. This creates problems for the Neutron
|
||||
# devstack script that creates a Neutron router, which in turn
|
||||
# triggers service VM dispatching.
|
||||
# Only perform pool maintenance if needed Nova services have started
|
||||
if (cfg.CONF.general.ensure_nova_running and not self._nova_running):
|
||||
if self._svc_vm_mgr.nova_services_up():
|
||||
self.__class__._nova_running = True
|
||||
else:
|
||||
LOG.info(_LI('Not all Nova services are up and running. '
|
||||
'Skipping this CSR1kv vm create request.'))
|
||||
return
|
||||
plugging_drv = self.get_hosting_device_plugging_driver()
|
||||
hosting_device_drv = self.get_hosting_device_driver()
|
||||
if plugging_drv is None or hosting_device_drv is None:
|
||||
return
|
||||
# These resources are owned by the L3AdminTenant
|
||||
complementary_id = uuidutils.generate_uuid()
|
||||
dev_data = {'complementary_id': complementary_id,
|
||||
'device_id': 'CSR1kv',
|
||||
'admin_state_up': True,
|
||||
'protocol_port': 22,
|
||||
'created_at': timeutils.utcnow()}
|
||||
res = plugging_drv.create_hosting_device_resources(
|
||||
context, complementary_id, self.l3_tenant_id(),
|
||||
self.mgmt_nw_id(), self.mgmt_sec_grp_id(), 1)
|
||||
if res.get('mgmt_port') is None:
|
||||
# Required ports could not be created
|
||||
return
|
||||
vm_instance = self._svc_vm_mgr.dispatch_service_vm(
|
||||
context, 'CSR1kv_nrouter', cfg.CONF.hosting_devices.csr1kv_image,
|
||||
cfg.CONF.hosting_devices.csr1kv_flavor, hosting_device_drv,
|
||||
res['mgmt_port'], res.get('ports'))
|
||||
with context.session.begin(subtransactions=True):
|
||||
if vm_instance is not None:
|
||||
dev_data.update(
|
||||
{'id': vm_instance['id'],
|
||||
'management_port_id': res['mgmt_port']['id']})
|
||||
hosting_device = self._create_hosting_device(
|
||||
context, {'hosting_device': dev_data})
|
||||
else:
|
||||
# Fundamental error like could not contact Nova
|
||||
# Cleanup anything we created
|
||||
plugging_drv.delete_hosting_device_resources(
|
||||
context, self.l3_tenant_id(), **res)
|
||||
return
|
||||
LOG.info(_LI('Created a CSR1kv hosting device VM'))
|
||||
return hosting_device
|
||||
|
||||
def _delete_service_vm_hosting_device(self, context, hosting_device):
|
||||
"""Deletes a <hosting_device> service VM.
|
||||
|
||||
This will indirectly make all of its hosted resources unscheduled.
|
||||
"""
|
||||
if hosting_device is None:
|
||||
return
|
||||
plugging_drv = self.get_hosting_device_plugging_driver()
|
||||
if plugging_drv is None:
|
||||
return
|
||||
res = plugging_drv.get_hosting_device_resources(
|
||||
context, hosting_device['id'], hosting_device['complementary_id'],
|
||||
self.l3_tenant_id(), self.mgmt_nw_id())
|
||||
if not self._svc_vm_mgr.delete_service_vm(context,
|
||||
hosting_device['id']):
|
||||
LOG.error(_LE('Failed to delete hosting device %s service VM. '
|
||||
'Will un-register it anyway.'),
|
||||
hosting_device['id'])
|
||||
plugging_drv.delete_hosting_device_resources(
|
||||
context, self.l3_tenant_id(), **res)
|
||||
with context.session.begin(subtransactions=True):
|
||||
context.session.delete(hosting_device)
|
||||
|
||||
def _create_hosting_device(self, context, hosting_device):
|
||||
LOG.debug('create_hosting_device() called')
|
||||
hd = hosting_device['hosting_device']
|
||||
tenant_id = self._get_tenant_id_for_create(context, hd)
|
||||
with context.session.begin(subtransactions=True):
|
||||
hd_db = l3_models.HostingDevice(
|
||||
id=hd.get('id') or uuidutils.generate_uuid(),
|
||||
complementary_id=hd.get('complementary_id'),
|
||||
tenant_id=tenant_id,
|
||||
device_id=hd.get('device_id'),
|
||||
admin_state_up=hd.get('admin_state_up', True),
|
||||
management_port_id=hd['management_port_id'],
|
||||
protocol_port=hd.get('protocol_port'),
|
||||
cfg_agent_id=hd.get('cfg_agent_id'),
|
||||
created_at=hd.get('created_at', timeutils.utcnow()),
|
||||
status=hd.get('status', svc_constants.ACTIVE))
|
||||
context.session.add(hd_db)
|
||||
return hd_db
|
||||
|
||||
def _select_cfgagent(self, context, hosting_device):
|
||||
"""Selects Cisco cfg agent that will configure <hosting_device>."""
|
||||
if not hosting_device:
|
||||
LOG.debug('Hosting device to schedule not specified')
|
||||
return
|
||||
elif hosting_device.cfg_agent:
|
||||
LOG.debug('Hosting device %(hd_id)s has already been '
|
||||
'assigned to Cisco cfg agent %(agent_id)s',
|
||||
{'hd_id': id,
|
||||
'agent_id': hosting_device.cfg_agent.id})
|
||||
return
|
||||
with context.session.begin(subtransactions=True):
|
||||
active_cfg_agents = self._get_cfg_agents(context, active=True)
|
||||
if not active_cfg_agents:
|
||||
LOG.warning(_LW('There are no active Cisco cfg agents'))
|
||||
# No worries, once a Cisco cfg agent is started and
|
||||
# announces itself any "dangling" hosting devices
|
||||
# will be scheduled to it.
|
||||
return
|
||||
chosen_agent = random.choice(active_cfg_agents)
|
||||
hosting_device.cfg_agent = chosen_agent
|
||||
context.session.add(hosting_device)
|
||||
return chosen_agent
|
||||
|
||||
def _get_cfg_agents(self, context, active=None, filters=None):
|
||||
query = context.session.query(agents_db.Agent)
|
||||
query = query.filter(
|
||||
agents_db.Agent.agent_type == c_constants.AGENT_TYPE_CFG)
|
||||
if active is not None:
|
||||
query = (query.filter(agents_db.Agent.admin_state_up == active))
|
||||
if filters:
|
||||
for key, value in filters.iteritems():
|
||||
column = getattr(agents_db.Agent, key, None)
|
||||
if column:
|
||||
query = query.filter(column.in_(value))
|
||||
cfg_agents = query.all()
|
||||
if active is not None:
|
||||
cfg_agents = [cfg_agent for cfg_agent in cfg_agents
|
||||
if not self.is_agent_down(
|
||||
cfg_agent['heartbeat_timestamp'])]
|
||||
return cfg_agents
|
@ -1,629 +0,0 @@
|
||||
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import copy
|
||||
|
||||
from oslo_concurrency import lockutils
|
||||
from oslo_config import cfg
|
||||
from sqlalchemy.orm import exc
|
||||
from sqlalchemy.orm import joinedload
|
||||
from sqlalchemy.sql import expression as expr
|
||||
|
||||
from neutron.common import constants as l3_constants
|
||||
from neutron.common import exceptions as n_exc
|
||||
from neutron.common import rpc as n_rpc
|
||||
from neutron.common import topics
|
||||
from neutron import context as n_context
|
||||
from neutron.db import agents_db
|
||||
from neutron.db import extraroute_db
|
||||
from neutron.db import l3_db
|
||||
from neutron.db import models_v2
|
||||
from neutron.db import portbindings_db as p_binding
|
||||
from neutron.extensions import providernet as pr_net
|
||||
from neutron.i18n import _LE, _LI
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.openstack.common import loopingcall
|
||||
from neutron.plugins.cisco.common import cisco_constants as c_const
|
||||
from neutron.plugins.cisco.db.l3 import l3_models
|
||||
from neutron.plugins.cisco.l3.rpc import l3_router_rpc_joint_agent_api
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
ROUTER_APPLIANCE_OPTS = [
|
||||
cfg.IntOpt('backlog_processing_interval',
|
||||
default=10,
|
||||
help=_('Time in seconds between renewed scheduling attempts of '
|
||||
'non-scheduled routers.')),
|
||||
]
|
||||
|
||||
cfg.CONF.register_opts(ROUTER_APPLIANCE_OPTS, "general")
|
||||
|
||||
|
||||
class RouterCreateInternalError(n_exc.NeutronException):
|
||||
message = _("Router could not be created due to internal error.")
|
||||
|
||||
|
||||
class RouterInternalError(n_exc.NeutronException):
|
||||
message = _("Internal error during router processing.")
|
||||
|
||||
|
||||
class RouterBindingInfoError(n_exc.NeutronException):
|
||||
message = _("Could not get binding information for router %(router_id)s.")
|
||||
|
||||
|
||||
class L3RouterApplianceDBMixin(extraroute_db.ExtraRoute_dbonly_mixin):
|
||||
"""Mixin class implementing Neutron's routing service using appliances."""
|
||||
|
||||
# Dictionary of routers for which new scheduling attempts should
|
||||
# be made and the refresh setting and heartbeat for that.
|
||||
_backlogged_routers = {}
|
||||
_refresh_router_backlog = True
|
||||
_heartbeat = None
|
||||
|
||||
@property
|
||||
def l3_cfg_rpc_notifier(self):
|
||||
if not hasattr(self, '_l3_cfg_rpc_notifier'):
|
||||
self._l3_cfg_rpc_notifier = (l3_router_rpc_joint_agent_api.
|
||||
L3RouterJointAgentNotifyAPI(self))
|
||||
return self._l3_cfg_rpc_notifier
|
||||
|
||||
@l3_cfg_rpc_notifier.setter
|
||||
def l3_cfg_rpc_notifier(self, value):
|
||||
self._l3_cfg_rpc_notifier = value
|
||||
|
||||
def create_router(self, context, router):
|
||||
with context.session.begin(subtransactions=True):
|
||||
if self.mgmt_nw_id() is None:
|
||||
raise RouterCreateInternalError()
|
||||
router_created = (super(L3RouterApplianceDBMixin, self).
|
||||
create_router(context, router))
|
||||
r_hd_b_db = l3_models.RouterHostingDeviceBinding(
|
||||
router_id=router_created['id'],
|
||||
auto_schedule=True,
|
||||
hosting_device_id=None)
|
||||
context.session.add(r_hd_b_db)
|
||||
# backlog so this new router gets scheduled asynchronously
|
||||
self.backlog_router(r_hd_b_db['router'])
|
||||
return router_created
|
||||
|
||||
def update_router(self, context, id, router):
|
||||
r = router['router']
|
||||
# Check if external gateway has changed so we may have to
|
||||
# update trunking
|
||||
o_r_db = self._get_router(context, id)
|
||||
old_ext_gw = (o_r_db.gw_port or {}).get('network_id')
|
||||
new_ext_gw = (r.get('external_gateway_info', {}) or {}).get(
|
||||
'network_id')
|
||||
with context.session.begin(subtransactions=True):
|
||||
e_context = context.elevated()
|
||||
if old_ext_gw is not None and old_ext_gw != new_ext_gw:
|
||||
o_r = self._make_router_dict(o_r_db, process_extensions=False)
|
||||
# no need to schedule now since we're only doing this to
|
||||
# tear-down connectivity and there won't be any if not
|
||||
# already scheduled.
|
||||
self._add_type_and_hosting_device_info(e_context, o_r,
|
||||
schedule=False)
|
||||
p_drv = self.get_hosting_device_plugging_driver()
|
||||
if p_drv is not None:
|
||||
p_drv.teardown_logical_port_connectivity(e_context,
|
||||
o_r_db.gw_port)
|
||||
router_updated = (
|
||||
super(L3RouterApplianceDBMixin, self).update_router(
|
||||
context, id, router))
|
||||
routers = [copy.deepcopy(router_updated)]
|
||||
self._add_type_and_hosting_device_info(e_context, routers[0])
|
||||
self.l3_cfg_rpc_notifier.routers_updated(context, routers)
|
||||
return router_updated
|
||||
|
||||
def delete_router(self, context, id):
|
||||
router_db = self._get_router(context, id)
|
||||
router = self._make_router_dict(router_db)
|
||||
with context.session.begin(subtransactions=True):
|
||||
e_context = context.elevated()
|
||||
r_hd_binding = self._get_router_binding_info(e_context, id)
|
||||
self._add_type_and_hosting_device_info(
|
||||
e_context, router, binding_info=r_hd_binding, schedule=False)
|
||||
if router_db.gw_port is not None:
|
||||
p_drv = self.get_hosting_device_plugging_driver()
|
||||
if p_drv is not None:
|
||||
p_drv.teardown_logical_port_connectivity(e_context,
|
||||
router_db.gw_port)
|
||||
# conditionally remove router from backlog just to be sure
|
||||
self.remove_router_from_backlog(id)
|
||||
if router['hosting_device'] is not None:
|
||||
self.unschedule_router_from_hosting_device(context,
|
||||
r_hd_binding)
|
||||
super(L3RouterApplianceDBMixin, self).delete_router(context, id)
|
||||
self.l3_cfg_rpc_notifier.router_deleted(context, router)
|
||||
|
||||
def notify_router_interface_action(
|
||||
self, context, router_interface_info, routers, action):
|
||||
l3_method = '%s_router_interface' % action
|
||||
self.l3_cfg_rpc_notifier.routers_updated(context, routers, l3_method)
|
||||
|
||||
mapping = {'add': 'create', 'remove': 'delete'}
|
||||
notifier = n_rpc.get_notifier('network')
|
||||
router_event = 'router.interface.%s' % mapping[action]
|
||||
notifier.info(context, router_event,
|
||||
{'router_interface': router_interface_info})
|
||||
|
||||
def add_router_interface(self, context, router_id, interface_info):
|
||||
with context.session.begin(subtransactions=True):
|
||||
info = (super(L3RouterApplianceDBMixin, self).
|
||||
add_router_interface(context, router_id, interface_info))
|
||||
routers = [self.get_router(context, router_id)]
|
||||
self._add_type_and_hosting_device_info(context.elevated(),
|
||||
routers[0])
|
||||
self.notify_router_interface_action(context, info, routers, 'add')
|
||||
return info
|
||||
|
||||
def remove_router_interface(self, context, router_id, interface_info):
|
||||
if 'port_id' in (interface_info or {}):
|
||||
port_db = self._core_plugin._get_port(
|
||||
context, interface_info['port_id'])
|
||||
elif 'subnet_id' in (interface_info or {}):
|
||||
subnet_db = self._core_plugin._get_subnet(
|
||||
context, interface_info['subnet_id'])
|
||||
port_db = self._get_router_port_db_on_subnet(
|
||||
context, router_id, subnet_db)
|
||||
else:
|
||||
msg = _("Either subnet_id or port_id must be specified")
|
||||
raise n_exc.BadRequest(resource='router', msg=msg)
|
||||
routers = [self.get_router(context, router_id)]
|
||||
with context.session.begin(subtransactions=True):
|
||||
e_context = context.elevated()
|
||||
self._add_type_and_hosting_device_info(e_context, routers[0])
|
||||
p_drv = self.get_hosting_device_plugging_driver()
|
||||
if p_drv is not None:
|
||||
p_drv.teardown_logical_port_connectivity(e_context, port_db)
|
||||
info = (super(L3RouterApplianceDBMixin, self).
|
||||
remove_router_interface(context, router_id,
|
||||
interface_info))
|
||||
self.notify_router_interface_action(context, info, routers, 'remove')
|
||||
return info
|
||||
|
||||
def create_floatingip(
|
||||
self, context, floatingip,
|
||||
initial_status=l3_constants.FLOATINGIP_STATUS_ACTIVE):
|
||||
with context.session.begin(subtransactions=True):
|
||||
info = super(L3RouterApplianceDBMixin, self).create_floatingip(
|
||||
context, floatingip)
|
||||
if info['router_id']:
|
||||
routers = [self.get_router(context, info['router_id'])]
|
||||
self._add_type_and_hosting_device_info(context.elevated(),
|
||||
routers[0])
|
||||
self.l3_cfg_rpc_notifier.routers_updated(context, routers,
|
||||
'create_floatingip')
|
||||
return info
|
||||
|
||||
def update_floatingip(self, context, id, floatingip):
|
||||
orig_fl_ip = super(L3RouterApplianceDBMixin, self).get_floatingip(
|
||||
context, id)
|
||||
before_router_id = orig_fl_ip['router_id']
|
||||
with context.session.begin(subtransactions=True):
|
||||
info = super(L3RouterApplianceDBMixin, self).update_floatingip(
|
||||
context, id, floatingip)
|
||||
router_ids = []
|
||||
if before_router_id:
|
||||
router_ids.append(before_router_id)
|
||||
router_id = info['router_id']
|
||||
if router_id and router_id != before_router_id:
|
||||
router_ids.append(router_id)
|
||||
routers = []
|
||||
for router_id in router_ids:
|
||||
router = self.get_router(context, router_id)
|
||||
self._add_type_and_hosting_device_info(context.elevated(),
|
||||
router)
|
||||
routers.append(router)
|
||||
self.l3_cfg_rpc_notifier.routers_updated(context, routers,
|
||||
'update_floatingip')
|
||||
return info
|
||||
|
||||
def delete_floatingip(self, context, id):
|
||||
floatingip_db = self._get_floatingip(context, id)
|
||||
router_id = floatingip_db['router_id']
|
||||
with context.session.begin(subtransactions=True):
|
||||
super(L3RouterApplianceDBMixin, self).delete_floatingip(
|
||||
context, id)
|
||||
if router_id:
|
||||
routers = [self.get_router(context, router_id)]
|
||||
self._add_type_and_hosting_device_info(context.elevated(),
|
||||
routers[0])
|
||||
self.l3_cfg_rpc_notifier.routers_updated(context, routers,
|
||||
'delete_floatingip')
|
||||
|
||||
def disassociate_floatingips(self, context, port_id, do_notify=True):
|
||||
with context.session.begin(subtransactions=True):
|
||||
router_ids = super(L3RouterApplianceDBMixin,
|
||||
self).disassociate_floatingips(context, port_id)
|
||||
if router_ids and do_notify:
|
||||
routers = []
|
||||
for router_id in router_ids:
|
||||
router = self.get_router(context, router_id)
|
||||
self._add_type_and_hosting_device_info(context.elevated(),
|
||||
router)
|
||||
routers.append(router)
|
||||
self.l3_cfg_rpc_notifier.routers_updated(
|
||||
context, routers, 'disassociate_floatingips')
|
||||
# since caller assumes that we handled notifications on its
|
||||
# behalf, return nothing
|
||||
return
|
||||
return router_ids
|
||||
|
||||
@lockutils.synchronized('routerbacklog', 'neutron-')
|
||||
def _handle_non_responding_hosting_devices(self, context, hosting_devices,
|
||||
affected_resources):
|
||||
"""Handle hosting devices determined to be "dead".
|
||||
|
||||
This function is called by the hosting device manager.
|
||||
Service plugins are supposed to extend the 'affected_resources'
|
||||
dictionary. Hence, we add the id of Neutron routers that are
|
||||
hosted in <hosting_devices>.
|
||||
|
||||
param: hosting_devices - list of dead hosting devices
|
||||
param: affected_resources - dict with list of affected logical
|
||||
resources per hosting device:
|
||||
{'hd_id1': {'routers': [id1, id2, ...],
|
||||
'fw': [id1, ...],
|
||||
...},
|
||||
'hd_id2': {'routers': [id3, id4, ...],
|
||||
'fw': [id1, ...],
|
||||
...},
|
||||
...}
|
||||
"""
|
||||
LOG.debug('Processing affected routers in dead hosting devices')
|
||||
with context.session.begin(subtransactions=True):
|
||||
for hd in hosting_devices:
|
||||
hd_bindings = self._get_hosting_device_bindings(context,
|
||||
hd['id'])
|
||||
router_ids = []
|
||||
for binding in hd_bindings:
|
||||
router_ids.append(binding['router_id'])
|
||||
if binding['auto_schedule']:
|
||||
self.backlog_router(binding['router'])
|
||||
try:
|
||||
affected_resources[hd['id']].update(
|
||||
{'routers': router_ids})
|
||||
except KeyError:
|
||||
affected_resources[hd['id']] = {'routers': router_ids}
|
||||
|
||||
def get_sync_data_ext(self, context, router_ids=None, active=None):
|
||||
"""Query routers and their related floating_ips, interfaces.
|
||||
|
||||
Adds information about hosting device as well as trunking.
|
||||
"""
|
||||
with context.session.begin(subtransactions=True):
|
||||
sync_data = (super(L3RouterApplianceDBMixin, self).
|
||||
get_sync_data(context, router_ids, active))
|
||||
for router in sync_data:
|
||||
self._add_type_and_hosting_device_info(context, router)
|
||||
plg_drv = self.get_hosting_device_plugging_driver()
|
||||
if plg_drv and router['hosting_device']:
|
||||
self._add_hosting_port_info(context, router, plg_drv)
|
||||
return sync_data
|
||||
|
||||
def schedule_router_on_hosting_device(self, context, r_hd_binding):
|
||||
LOG.info(_LI('Attempting to schedule router %s.'),
|
||||
r_hd_binding['router']['id'])
|
||||
result = self._create_csr1kv_vm_hosting_device(context.elevated())
|
||||
if result is None:
|
||||
# CSR1kv hosting device creation was unsuccessful so backlog
|
||||
# it for another scheduling attempt later.
|
||||
self.backlog_router(r_hd_binding['router'])
|
||||
return False
|
||||
with context.session.begin(subtransactions=True):
|
||||
router = r_hd_binding['router']
|
||||
r_hd_binding.hosting_device = result
|
||||
self.remove_router_from_backlog(router['id'])
|
||||
LOG.info(_LI('Successfully scheduled router %(r_id)s to '
|
||||
'hosting device %(d_id)s'),
|
||||
{'r_id': r_hd_binding['router']['id'],
|
||||
'd_id': result['id']})
|
||||
return True
|
||||
|
||||
def unschedule_router_from_hosting_device(self, context, r_hd_binding):
|
||||
LOG.info(_LI('Un-schedule router %s.'),
|
||||
r_hd_binding['router']['id'])
|
||||
hosting_device = r_hd_binding['hosting_device']
|
||||
if r_hd_binding['hosting_device'] is None:
|
||||
return False
|
||||
self._delete_service_vm_hosting_device(context.elevated(),
|
||||
hosting_device)
|
||||
|
||||
@lockutils.synchronized('routers', 'neutron-')
|
||||
def backlog_router(self, router):
|
||||
if ((router or {}).get('id') is None or
|
||||
router['id'] in self._backlogged_routers):
|
||||
return
|
||||
LOG.info(_LI('Backlogging router %s for renewed scheduling attempt '
|
||||
'later'), router['id'])
|
||||
self._backlogged_routers[router['id']] = router
|
||||
|
||||
@lockutils.synchronized('routers', 'neutron-')
|
||||
def remove_router_from_backlog(self, id):
|
||||
self._backlogged_routers.pop(id, None)
|
||||
LOG.info(_LI('Router %s removed from backlog'), id)
|
||||
|
||||
@lockutils.synchronized('routerbacklog', 'neutron-')
|
||||
def _process_backlogged_routers(self):
|
||||
if self._refresh_router_backlog:
|
||||
self._sync_router_backlog()
|
||||
if not self._backlogged_routers:
|
||||
return
|
||||
context = n_context.get_admin_context()
|
||||
scheduled_routers = []
|
||||
LOG.info(_LI('Processing router (scheduling) backlog'))
|
||||
# try to reschedule
|
||||
for r_id, router in self._backlogged_routers.items():
|
||||
self._add_type_and_hosting_device_info(context, router)
|
||||
if router.get('hosting_device'):
|
||||
# scheduling attempt succeeded
|
||||
scheduled_routers.append(router)
|
||||
self._backlogged_routers.pop(r_id, None)
|
||||
# notify cfg agents so the scheduled routers are instantiated
|
||||
if scheduled_routers:
|
||||
self.l3_cfg_rpc_notifier.routers_updated(context,
|
||||
scheduled_routers)
|
||||
|
||||
def _setup_backlog_handling(self):
|
||||
self._heartbeat = loopingcall.FixedIntervalLoopingCall(
|
||||
self._process_backlogged_routers)
|
||||
self._heartbeat.start(
|
||||
interval=cfg.CONF.general.backlog_processing_interval)
|
||||
|
||||
def _sync_router_backlog(self):
|
||||
LOG.info(_LI('Synchronizing router (scheduling) backlog'))
|
||||
context = n_context.get_admin_context()
|
||||
query = context.session.query(l3_models.RouterHostingDeviceBinding)
|
||||
query = query.options(joinedload('router'))
|
||||
query = query.filter(
|
||||
l3_models.RouterHostingDeviceBinding.hosting_device_id ==
|
||||
expr.null())
|
||||
for binding in query:
|
||||
router = self._make_router_dict(binding.router,
|
||||
process_extensions=False)
|
||||
self._backlogged_routers[binding.router_id] = router
|
||||
self._refresh_router_backlog = False
|
||||
|
||||
def _get_router_binding_info(self, context, id, load_hd_info=True):
|
||||
query = context.session.query(l3_models.RouterHostingDeviceBinding)
|
||||
if load_hd_info:
|
||||
query = query.options(joinedload('hosting_device'))
|
||||
query = query.filter(l3_models.RouterHostingDeviceBinding.router_id ==
|
||||
id)
|
||||
try:
|
||||
return query.one()
|
||||
except exc.NoResultFound:
|
||||
# This should not happen
|
||||
LOG.error(_LE('DB inconsistency: No type and hosting info '
|
||||
'associated with router %s'), id)
|
||||
raise RouterBindingInfoError(router_id=id)
|
||||
except exc.MultipleResultsFound:
|
||||
# This should not happen either
|
||||
LOG.error(_LE('DB inconsistency: Multiple type and hosting info '
|
||||
'associated with router %s'), id)
|
||||
raise RouterBindingInfoError(router_id=id)
|
||||
|
||||
def _get_hosting_device_bindings(self, context, id, load_routers=False,
|
||||
load_hosting_device=False):
|
||||
query = context.session.query(l3_models.RouterHostingDeviceBinding)
|
||||
if load_routers:
|
||||
query = query.options(joinedload('router'))
|
||||
if load_hosting_device:
|
||||
query = query.options(joinedload('hosting_device'))
|
||||
query = query.filter(
|
||||
l3_models.RouterHostingDeviceBinding.hosting_device_id == id)
|
||||
return query.all()
|
||||
|
||||
def _add_type_and_hosting_device_info(self, context, router,
|
||||
binding_info=None, schedule=True):
|
||||
"""Adds type and hosting device information to a router."""
|
||||
try:
|
||||
if binding_info is None:
|
||||
binding_info = self._get_router_binding_info(context,
|
||||
router['id'])
|
||||
except RouterBindingInfoError:
|
||||
LOG.error(_LE('DB inconsistency: No hosting info associated with '
|
||||
'router %s'), router['id'])
|
||||
router['hosting_device'] = None
|
||||
return
|
||||
router['router_type'] = {
|
||||
'id': None,
|
||||
'name': 'CSR1kv_router',
|
||||
'cfg_agent_driver': (cfg.CONF.hosting_devices
|
||||
.csr1kv_cfgagent_router_driver)}
|
||||
if binding_info.hosting_device is None and schedule:
|
||||
# This router has not been scheduled to a hosting device
|
||||
# so we try to do it now.
|
||||
self.schedule_router_on_hosting_device(context, binding_info)
|
||||
context.session.expire(binding_info)
|
||||
if binding_info.hosting_device is None:
|
||||
router['hosting_device'] = None
|
||||
else:
|
||||
router['hosting_device'] = self.get_device_info_for_agent(
|
||||
binding_info.hosting_device)
|
||||
|
||||
def _add_hosting_port_info(self, context, router, plugging_driver):
|
||||
"""Adds hosting port information to router ports.
|
||||
|
||||
We only populate hosting port info, i.e., reach here, if the
|
||||
router has been scheduled to a hosting device. Hence this
|
||||
a good place to allocate hosting ports to the router ports.
|
||||
"""
|
||||
# cache of hosting port information: {mac_addr: {'name': port_name}}
|
||||
hosting_pdata = {}
|
||||
if router['external_gateway_info'] is not None:
|
||||
h_info, did_allocation = self._populate_hosting_info_for_port(
|
||||
context, router['id'], router['gw_port'],
|
||||
router['hosting_device'], hosting_pdata, plugging_driver)
|
||||
for itfc in router.get(l3_constants.INTERFACE_KEY, []):
|
||||
h_info, did_allocation = self._populate_hosting_info_for_port(
|
||||
context, router['id'], itfc, router['hosting_device'],
|
||||
hosting_pdata, plugging_driver)
|
||||
|
||||
def _populate_hosting_info_for_port(self, context, router_id, port,
|
||||
hosting_device, hosting_pdata,
|
||||
plugging_driver):
|
||||
port_db = self._core_plugin._get_port(context, port['id'])
|
||||
h_info = port_db.hosting_info
|
||||
new_allocation = False
|
||||
if h_info is None:
|
||||
# The port does not yet have a hosting port so allocate one now
|
||||
h_info = self._allocate_hosting_port(
|
||||
context, router_id, port_db, hosting_device['id'],
|
||||
plugging_driver)
|
||||
if h_info is None:
|
||||
# This should not happen but just in case ...
|
||||
port['hosting_info'] = None
|
||||
return None, new_allocation
|
||||
else:
|
||||
new_allocation = True
|
||||
if hosting_pdata.get('mac') is None:
|
||||
p_data = self._core_plugin.get_port(
|
||||
context, h_info.hosting_port_id, ['mac_address', 'name'])
|
||||
hosting_pdata['mac'] = p_data['mac_address']
|
||||
hosting_pdata['name'] = p_data['name']
|
||||
# Including MAC address of hosting port so L3CfgAgent can easily
|
||||
# determine which VM VIF to configure VLAN sub-interface on.
|
||||
port['hosting_info'] = {'hosting_port_id': h_info.hosting_port_id,
|
||||
'hosting_mac': hosting_pdata.get('mac'),
|
||||
'hosting_port_name': hosting_pdata.get('name')}
|
||||
plugging_driver.extend_hosting_port_info(
|
||||
context, port_db, port['hosting_info'])
|
||||
return h_info, new_allocation
|
||||
|
||||
def _allocate_hosting_port(self, context, router_id, port_db,
|
||||
hosting_device_id, plugging_driver):
|
||||
net_data = self._core_plugin.get_network(
|
||||
context, port_db['network_id'], [pr_net.NETWORK_TYPE])
|
||||
network_type = net_data.get(pr_net.NETWORK_TYPE)
|
||||
alloc = plugging_driver.allocate_hosting_port(
|
||||
context, router_id, port_db, network_type, hosting_device_id)
|
||||
if alloc is None:
|
||||
LOG.error(_LE('Failed to allocate hosting port for port %s'),
|
||||
port_db['id'])
|
||||
return
|
||||
with context.session.begin(subtransactions=True):
|
||||
h_info = l3_models.HostedHostingPortBinding(
|
||||
logical_resource_id=router_id,
|
||||
logical_port_id=port_db['id'],
|
||||
network_type=network_type,
|
||||
hosting_port_id=alloc['allocated_port_id'],
|
||||
segmentation_id=alloc['allocated_vlan'])
|
||||
context.session.add(h_info)
|
||||
context.session.expire(port_db)
|
||||
# allocation succeeded so establish connectivity for logical port
|
||||
context.session.expire(h_info)
|
||||
plugging_driver.setup_logical_port_connectivity(context, port_db)
|
||||
return h_info
|
||||
|
||||
def _get_router_port_db_on_subnet(self, context, router_id, subnet):
|
||||
try:
|
||||
rport_qry = context.session.query(models_v2.Port)
|
||||
ports = rport_qry.filter_by(
|
||||
device_id=router_id,
|
||||
device_owner=l3_db.DEVICE_OWNER_ROUTER_INTF,
|
||||
network_id=subnet['network_id'])
|
||||
for p in ports:
|
||||
if p['fixed_ips'][0]['subnet_id'] == subnet['id']:
|
||||
return p
|
||||
except exc.NoResultFound:
|
||||
return
|
||||
|
||||
def list_active_sync_routers_on_hosting_devices(self, context, host,
|
||||
router_ids=None,
|
||||
hosting_device_ids=None):
|
||||
agent = self._get_agent_by_type_and_host(
|
||||
context, c_const.AGENT_TYPE_CFG, host)
|
||||
if not agent.admin_state_up:
|
||||
return []
|
||||
query = context.session.query(
|
||||
l3_models.RouterHostingDeviceBinding.router_id)
|
||||
query = query.join(l3_models.HostingDevice)
|
||||
query = query.filter(l3_models.HostingDevice.cfg_agent_id == agent.id)
|
||||
if router_ids:
|
||||
if len(router_ids) == 1:
|
||||
query = query.filter(
|
||||
l3_models.RouterHostingDeviceBinding.router_id ==
|
||||
router_ids[0])
|
||||
else:
|
||||
query = query.filter(
|
||||
l3_models.RouterHostingDeviceBinding.router_id.in_(
|
||||
router_ids))
|
||||
if hosting_device_ids:
|
||||
if len(hosting_device_ids) == 1:
|
||||
query = query.filter(
|
||||
l3_models.RouterHostingDeviceBinding.hosting_device_id ==
|
||||
hosting_device_ids[0])
|
||||
elif len(hosting_device_ids) > 1:
|
||||
query = query.filter(
|
||||
l3_models.RouterHostingDeviceBinding.hosting_device_id.in_(
|
||||
hosting_device_ids))
|
||||
router_ids = [item[0] for item in query]
|
||||
if router_ids:
|
||||
return self.get_sync_data_ext(context, router_ids=router_ids,
|
||||
active=True)
|
||||
else:
|
||||
return []
|
||||
|
||||
def get_active_routers_for_host(self, context, host):
|
||||
query = context.session.query(
|
||||
l3_models.RouterHostingDeviceBinding.router_id)
|
||||
query = query.join(
|
||||
models_v2.Port,
|
||||
l3_models.RouterHostingDeviceBinding.hosting_device_id ==
|
||||
models_v2.Port.device_id)
|
||||
query = query.join(p_binding.PortBindingPort)
|
||||
query = query.filter(p_binding.PortBindingPort.host == host)
|
||||
query = query.filter(models_v2.Port.name == 'mgmt')
|
||||
router_ids = [item[0] for item in query]
|
||||
if router_ids:
|
||||
return self.get_sync_data_ext(context, router_ids=router_ids,
|
||||
active=True)
|
||||
else:
|
||||
return []
|
||||
|
||||
@staticmethod
|
||||
def _agent_state_filter(check_active, last_heartbeat):
|
||||
"""Filters only active agents, if requested."""
|
||||
if not check_active:
|
||||
return True
|
||||
return not agents_db.AgentDbMixin.is_agent_down(last_heartbeat)
|
||||
|
||||
def get_host_for_router(self, context, router, admin_state_up=None,
|
||||
check_active=False):
|
||||
query = context.session.query(agents_db.Agent.host,
|
||||
agents_db.Agent.heartbeat_timestamp)
|
||||
query = query.join(
|
||||
p_binding.PortBindingPort,
|
||||
p_binding.PortBindingPort.host == agents_db.Agent.host)
|
||||
query = query.join(
|
||||
models_v2.Port,
|
||||
models_v2.Port.id == p_binding.PortBindingPort.port_id)
|
||||
query = query.join(
|
||||
l3_models.RouterHostingDeviceBinding,
|
||||
l3_models.RouterHostingDeviceBinding.hosting_device_id ==
|
||||
models_v2.Port.device_id)
|
||||
query = query.filter(
|
||||
agents_db.Agent.topic == topics.L3_AGENT,
|
||||
l3_models.RouterHostingDeviceBinding.router_id == router)
|
||||
if admin_state_up is not None:
|
||||
query = query.filter(
|
||||
agents_db.Agent.admin_state_up == admin_state_up)
|
||||
entry = query.first()
|
||||
if entry and L3RouterApplianceDBMixin._agent_state_filter(check_active,
|
||||
entry[1]):
|
||||
return entry[0]
|
||||
return ""
|
@ -1,56 +0,0 @@
|
||||
hostname csr
|
||||
|
||||
alias interface ns no shutdown
|
||||
alias interface i do show ip interface brief
|
||||
alias interface s do show running-config
|
||||
alias configure i do show ip interface brief
|
||||
alias configure s do show running-config
|
||||
alias exec s sh run
|
||||
alias exec c conf t
|
||||
alias exec i sh ip int brie
|
||||
alias exec sr sh ip ro
|
||||
|
||||
line con 0
|
||||
logging synchronous
|
||||
transport preferred none
|
||||
|
||||
line vty 0 4
|
||||
login local
|
||||
transport preferred none
|
||||
transport input ssh
|
||||
|
||||
username stack priv 15 secret cisco
|
||||
|
||||
ip domain name mydomain.org
|
||||
crypto key generate rsa modulus 1024
|
||||
|
||||
ip ssh version 2
|
||||
ip ssh pubkey-chain
|
||||
username stack
|
||||
key-string
|
||||
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDipwLBYeYbqBLpmQ8gIO65Dx23SGcRR7W+ixnh14qORWNYiXih1zUGGbBcCAFuTkySSt/aQqMCx3AA47SKnqjSuaudHcoFLCAWTvPYMJIXvsCFMqs3BPR/3t0ak5J3ZDpqL8V+Bcw8crdl7SyAHm/k6ShHHZXNxVMUAtDVu5PDCZVIy7qo2GBEMIynaDrRQXp6vWZkK53Y5lHLCELYWilMv5XYgf/qDXXrJg2wxnIxGa02wek36h+39SMPY1jKsYIF+Tjp36jmf0iyRasiXGEvyGkKSQzKlkDV66zgNu+QQ/W1fTfbx7pIQjQplmv/b6vyRWjyObIza6wjYUhHrLQ1 stack@openstack1
|
||||
exit
|
||||
|
||||
netconf max-sessions 16
|
||||
netconf ssh
|
||||
|
||||
|
||||
interface GigabitEthernet1
|
||||
ip address <ip> <mask>
|
||||
no shutdown
|
||||
|
||||
virtual-service csr_mgmt
|
||||
ip shared host-interface GigabitEthernet1
|
||||
activate
|
||||
|
||||
remote-management
|
||||
restful-api local-port 55443
|
||||
|
||||
ip route 0.0.0.0 0.0.0.0 GigabitEthernet1 <gw>
|
||||
ip name-server <name_server>
|
||||
|
||||
license accept end user agreement
|
||||
license boot level premium
|
||||
|
||||
|
||||
|
@ -1,52 +0,0 @@
|
||||
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import abc
|
||||
|
||||
import six
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class HostingDeviceDriver(object):
|
||||
"""This class defines the API for hosting device drivers.
|
||||
|
||||
These are used by Cisco (routing service) plugin to perform
|
||||
various (plugin independent) operations on hosting devices.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def hosting_device_name(self):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def create_config(self, context, mgmtport):
|
||||
"""Creates configuration(s) for a service VM.
|
||||
|
||||
This function can be used to make initial configurations. The
|
||||
configuration(s) is/are injected in the VM's file system using
|
||||
Nova's configdrive feature.
|
||||
|
||||
Called when a service VM-based hosting device is to be created.
|
||||
This function should cleanup after itself in case of error.
|
||||
|
||||
returns: Dict with filenames and their corresponding content strings:
|
||||
{filename1: content_string1, filename2: content_string2, ...}
|
||||
The file system of the VM will contain files with the
|
||||
specified filenames and content. If the dict is empty no
|
||||
configdrive will be used.
|
||||
|
||||
:param context: neutron api request context.
|
||||
:param mgmt_port: management port for the hosting device.
|
||||
"""
|
||||
pass
|
@ -1,75 +0,0 @@
|
||||
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import netaddr
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import excutils
|
||||
|
||||
from neutron.i18n import _LE
|
||||
from neutron import manager
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.cisco.l3 import hosting_device_drivers
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Length mgmt port UUID to be part of VM's config drive filename
|
||||
CFG_DRIVE_UUID_START = 24
|
||||
CFG_DRIVE_UUID_LEN = 12
|
||||
|
||||
CSR1KV_HD_DRIVER_OPTS = [
|
||||
cfg.StrOpt('csr1kv_configdrive_template', default='csr1kv_cfg_template',
|
||||
help=_("CSR1kv configdrive template file.")),
|
||||
]
|
||||
|
||||
cfg.CONF.register_opts(CSR1KV_HD_DRIVER_OPTS, "hosting_devices")
|
||||
|
||||
|
||||
class CSR1kvHostingDeviceDriver(hosting_device_drivers.HostingDeviceDriver):
|
||||
|
||||
def hosting_device_name(self):
|
||||
return "CSR1kv"
|
||||
|
||||
def create_config(self, context, mgmtport):
|
||||
mgmt_ip = mgmtport['fixed_ips'][0]['ip_address']
|
||||
subnet_data = self._core_plugin.get_subnet(
|
||||
context, mgmtport['fixed_ips'][0]['subnet_id'],
|
||||
['cidr', 'gateway_ip', 'dns_nameservers'])
|
||||
netmask = str(netaddr.IPNetwork(subnet_data['cidr']).netmask)
|
||||
params = {'<ip>': mgmt_ip, '<mask>': netmask,
|
||||
'<gw>': subnet_data['gateway_ip'],
|
||||
'<name_server>': '8.8.8.8'}
|
||||
try:
|
||||
cfg_template_filename = (
|
||||
cfg.CONF.general.templates_path + "/" +
|
||||
cfg.CONF.hosting_devices.csr1kv_configdrive_template)
|
||||
vm_cfg_data = ''
|
||||
with open(cfg_template_filename, 'r') as cfg_template_file:
|
||||
# insert proper instance values in the template
|
||||
for line in cfg_template_file:
|
||||
tokens = line.strip('\n').split(' ')
|
||||
line = ' '.join(map(lambda x: params.get(x, x),
|
||||
tokens)) + '\n'
|
||||
vm_cfg_data += line
|
||||
return {'iosxe_config.txt': vm_cfg_data}
|
||||
except IOError:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE('Failed to create config file. Trying to '
|
||||
'clean up.'))
|
||||
self.delete_configdrive_files(context, mgmtport)
|
||||
|
||||
@property
|
||||
def _core_plugin(self):
|
||||
return manager.NeutronManager.get_plugin()
|
@ -1,147 +0,0 @@
|
||||
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import abc
|
||||
|
||||
|
||||
import six
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class PluginSidePluggingDriver(object):
|
||||
"""This class defines the API for plugging drivers.
|
||||
|
||||
These are used used by Cisco (routing service) plugin to perform
|
||||
various operations on the logical ports of logical (service) resources
|
||||
in a plugin compatible way.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def create_hosting_device_resources(self, context, complementary_id,
|
||||
tenant_id, mgmt_nw_id,
|
||||
mgmt_sec_grp_id, max_hosted):
|
||||
"""Create resources for a hosting device in a plugin specific way.
|
||||
|
||||
Called when a hosting device is to be created so resources like
|
||||
networks and ports can be created for it in a plugin compatible
|
||||
way. This is primarily useful to service VMs.
|
||||
|
||||
returns: a dict {'mgmt_port': <mgmt port or None>,
|
||||
'ports': <list of ports>,
|
||||
... arbitrary driver items }
|
||||
|
||||
:param context: Neutron api request context.
|
||||
:param complementary_id: complementary id of hosting device
|
||||
:param tenant_id: id of tenant owning the hosting device resources.
|
||||
:param mgmt_nw_id: id of management network for hosting devices.
|
||||
:param mgmt_sec_grp_id: id of security group for management network.
|
||||
:param max_hosted: maximum number of logical resources.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_hosting_device_resources(self, context, id, complementary_id,
|
||||
tenant_id, mgmt_nw_id):
|
||||
"""Returns information about all resources for a hosting device.
|
||||
|
||||
Called just before a hosting device is to be deleted so that
|
||||
information about the resources the hosting device uses can be
|
||||
collected.
|
||||
|
||||
returns: a dict {'mgmt_port': <mgmt port or None>,
|
||||
'ports': <list of ports>,
|
||||
... arbitrary driver items }
|
||||
|
||||
:param context: Neutron api request context.
|
||||
:param id: id of hosting device.
|
||||
:param complementary_id: complementary id of hosting device
|
||||
:param tenant_id: id of tenant owning the hosting device resources.
|
||||
:param mgmt_nw_id: id of management network for hosting devices.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def delete_hosting_device_resources(self, context, tenant_id, mgmt_port,
|
||||
**kwargs):
|
||||
"""Deletes resources for a hosting device in a plugin specific way.
|
||||
|
||||
Called when a hosting device has been deleted (or when its creation
|
||||
has failed) so resources like networks and ports can be deleted in
|
||||
a plugin compatible way. This it primarily useful to service VMs.
|
||||
|
||||
:param context: Neutron api request context.
|
||||
:param tenant_id: id of tenant owning the hosting device resources.
|
||||
:param mgmt_port: id of management port for the hosting device.
|
||||
:param kwargs: dictionary for any driver specific parameters.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def setup_logical_port_connectivity(self, context, port_db):
|
||||
"""Establishes connectivity for a logical port.
|
||||
|
||||
Performs the configuration tasks needed in the infrastructure
|
||||
to establish connectivity for a logical port.
|
||||
|
||||
:param context: Neutron api request context.
|
||||
:param port_db: Neutron port that has been created.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def teardown_logical_port_connectivity(self, context, port_db):
|
||||
"""Removes connectivity for a logical port.
|
||||
|
||||
Performs the configuration tasks needed in the infrastructure
|
||||
to disconnect a logical port.
|
||||
|
||||
Example: Remove a VLAN that is trunked to a service VM.
|
||||
|
||||
:param context: Neutron api request context.
|
||||
:param port_db: Neutron port about to be deleted.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def extend_hosting_port_info(self, context, port_db, hosting_info):
|
||||
"""Extends hosting information for a logical port.
|
||||
|
||||
Allows a driver to add driver specific information to the
|
||||
hosting information for a logical port.
|
||||
|
||||
:param context: Neutron api request context.
|
||||
:param port_db: Neutron port that hosting information concerns.
|
||||
:param hosting_info: dict with hosting port information to be extended.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def allocate_hosting_port(self, context, router_id, port_db, network_type,
|
||||
hosting_device_id):
|
||||
"""Allocates a hosting port for a logical port.
|
||||
|
||||
Schedules a logical port to a hosting port. Note that the hosting port
|
||||
may be the logical port itself.
|
||||
|
||||
returns: a dict {'allocated_port_id': <id of allocated port>,
|
||||
'allocated_vlan': <allocated VLAN or None>} or
|
||||
None if allocation failed
|
||||
|
||||
:param context: Neutron api request context.
|
||||
:param router_id: id of Neutron router the logical port belongs to.
|
||||
:param port_db: Neutron logical router port.
|
||||
:param network_type: Type of network for logical router port
|
||||
:param hosting_device_id: id of hosting device
|
||||
"""
|
||||
pass
|
@ -1,31 +0,0 @@
|
||||
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# Constants for the N1kv plugging drivers.
|
||||
|
||||
# These prefix defines will go away when Nova allows spinning up
|
||||
# VMs with vifs on networks without subnet(s).
|
||||
SUBNET_PREFIX = '172.16.1.0/24'
|
||||
|
||||
# T1 port/network is for VXLAN
|
||||
T1_PORT_NAME = 't1_p:'
|
||||
# T2 port/network is for VLAN
|
||||
T2_PORT_NAME = 't2_p:'
|
||||
T1_NETWORK_NAME = 't1_n:'
|
||||
T2_NETWORK_NAME = 't2_n:'
|
||||
T1_SUBNET_NAME = 't1_sn:'
|
||||
T2_SUBNET_NAME = 't2_sn:'
|
||||
|
||||
T1_SUBNET_START_PREFIX = '172.16.'
|
||||
T2_SUBNET_START_PREFIX = '172.32.'
|
@ -1,508 +0,0 @@
|
||||
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import eventlet
|
||||
|
||||
from oslo_config import cfg
|
||||
from sqlalchemy.orm import exc
|
||||
from sqlalchemy.sql import expression as expr
|
||||
|
||||
from neutron.api.v2 import attributes
|
||||
from neutron.common import exceptions as n_exc
|
||||
from neutron import context as n_context
|
||||
from neutron.db import models_v2
|
||||
from neutron.extensions import providernet as pr_net
|
||||
from neutron.i18n import _LE, _LI, _LW
|
||||
from neutron import manager
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.cisco.db.l3 import l3_models
|
||||
from neutron.plugins.cisco.extensions import n1kv
|
||||
import neutron.plugins.cisco.l3.plugging_drivers as plug
|
||||
from neutron.plugins.cisco.l3.plugging_drivers import (n1kv_plugging_constants
|
||||
as n1kv_const)
|
||||
from neutron.plugins.common import constants
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
N1KV_TRUNKING_DRIVER_OPTS = [
|
||||
cfg.StrOpt('management_port_profile', default='osn_mgmt_pp',
|
||||
help=_("Name of N1kv port profile for management ports.")),
|
||||
cfg.StrOpt('t1_port_profile', default='osn_t1_pp',
|
||||
help=_("Name of N1kv port profile for T1 ports (i.e., ports "
|
||||
"carrying traffic from VXLAN segmented networks).")),
|
||||
cfg.StrOpt('t2_port_profile', default='osn_t2_pp',
|
||||
help=_("Name of N1kv port profile for T2 ports (i.e., ports "
|
||||
"carrying traffic from VLAN segmented networks).")),
|
||||
cfg.StrOpt('t1_network_profile', default='osn_t1_np',
|
||||
help=_("Name of N1kv network profile for T1 networks (i.e., "
|
||||
"trunk networks for VXLAN segmented traffic).")),
|
||||
cfg.StrOpt('t2_network_profile', default='osn_t2_np',
|
||||
help=_("Name of N1kv network profile for T2 networks (i.e., "
|
||||
"trunk networks for VLAN segmented traffic).")),
|
||||
]
|
||||
|
||||
cfg.CONF.register_opts(N1KV_TRUNKING_DRIVER_OPTS, "n1kv")
|
||||
|
||||
MIN_LL_VLAN_TAG = 10
|
||||
MAX_LL_VLAN_TAG = 200
|
||||
FULL_VLAN_SET = set(range(MIN_LL_VLAN_TAG, MAX_LL_VLAN_TAG + 1))
|
||||
DELETION_ATTEMPTS = 5
|
||||
SECONDS_BETWEEN_DELETION_ATTEMPTS = 3
|
||||
|
||||
# Port lookups can fail so retries are needed
|
||||
MAX_HOSTING_PORT_LOOKUP_ATTEMPTS = 10
|
||||
SECONDS_BETWEEN_HOSTING_PORT_LOOKSUPS = 2
|
||||
|
||||
|
||||
class N1kvTrunkingPlugDriver(plug.PluginSidePluggingDriver):
|
||||
"""Driver class for service VMs used with the N1kv plugin.
|
||||
|
||||
The driver makes use N1kv plugin's VLAN trunk feature.
|
||||
"""
|
||||
_mgmt_port_profile_id = None
|
||||
_t1_port_profile_id = None
|
||||
_t2_port_profile_id = None
|
||||
_t1_network_profile_id = None
|
||||
_t2_network_profile_id = None
|
||||
|
||||
@property
|
||||
def _core_plugin(self):
|
||||
return manager.NeutronManager.get_plugin()
|
||||
|
||||
@classmethod
|
||||
def _get_profile_id(cls, p_type, resource, name):
|
||||
try:
|
||||
tenant_id = manager.NeutronManager.get_service_plugins()[
|
||||
constants.L3_ROUTER_NAT].l3_tenant_id()
|
||||
except AttributeError:
|
||||
return
|
||||
if tenant_id is None:
|
||||
return
|
||||
core_plugin = manager.NeutronManager.get_plugin()
|
||||
if p_type == 'net_profile':
|
||||
profiles = core_plugin.get_network_profiles(
|
||||
n_context.get_admin_context(),
|
||||
{'tenant_id': [tenant_id], 'name': [name]},
|
||||
['id'])
|
||||
else:
|
||||
profiles = core_plugin.get_policy_profiles(
|
||||
n_context.get_admin_context(),
|
||||
{'tenant_id': [tenant_id], 'name': [name]},
|
||||
['id'])
|
||||
if len(profiles) == 1:
|
||||
return profiles[0]['id']
|
||||
elif len(profiles) > 1:
|
||||
# Profile must have a unique name.
|
||||
LOG.error(_LE('The %(resource)s %(name)s does not have unique '
|
||||
'name. Please refer to admin guide and create one.'),
|
||||
{'resource': resource, 'name': name})
|
||||
else:
|
||||
# Profile has not been created.
|
||||
LOG.error(_LE('There is no %(resource)s %(name)s. Please refer to '
|
||||
'admin guide and create one.'),
|
||||
{'resource': resource, 'name': name})
|
||||
|
||||
@classmethod
|
||||
def mgmt_port_profile_id(cls):
|
||||
if cls._mgmt_port_profile_id is None:
|
||||
cls._mgmt_port_profile_id = cls._get_profile_id(
|
||||
'port_profile', 'N1kv port profile',
|
||||
cfg.CONF.n1kv.management_port_profile)
|
||||
return cls._mgmt_port_profile_id
|
||||
|
||||
@classmethod
|
||||
def t1_port_profile_id(cls):
|
||||
if cls._t1_port_profile_id is None:
|
||||
cls._t1_port_profile_id = cls._get_profile_id(
|
||||
'port_profile', 'N1kv port profile',
|
||||
cfg.CONF.n1kv.t1_port_profile)
|
||||
return cls._t1_port_profile_id
|
||||
|
||||
@classmethod
|
||||
def t2_port_profile_id(cls):
|
||||
if cls._t2_port_profile_id is None:
|
||||
cls._t2_port_profile_id = cls._get_profile_id(
|
||||
'port_profile', 'N1kv port profile',
|
||||
cfg.CONF.n1kv.t2_port_profile)
|
||||
return cls._t2_port_profile_id
|
||||
|
||||
@classmethod
|
||||
def t1_network_profile_id(cls):
|
||||
if cls._t1_network_profile_id is None:
|
||||
cls._t1_network_profile_id = cls._get_profile_id(
|
||||
'net_profile', 'N1kv network profile',
|
||||
cfg.CONF.n1kv.t1_network_profile)
|
||||
return cls._t1_network_profile_id
|
||||
|
||||
@classmethod
|
||||
def t2_network_profile_id(cls):
|
||||
if cls._t2_network_profile_id is None:
|
||||
cls._t2_network_profile_id = cls._get_profile_id(
|
||||
'net_profile', 'N1kv network profile',
|
||||
cfg.CONF.n1kv.t2_network_profile)
|
||||
return cls._t2_network_profile_id
|
||||
|
||||
def create_hosting_device_resources(self, context, complementary_id,
|
||||
tenant_id, mgmt_nw_id,
|
||||
mgmt_sec_grp_id, max_hosted):
|
||||
mgmt_port = None
|
||||
t1_n, t1_sn, t2_n, t2_sn, t_p = [], [], [], [], []
|
||||
if mgmt_nw_id is not None and tenant_id is not None:
|
||||
# Create port for mgmt interface
|
||||
p_spec = {'port': {
|
||||
'tenant_id': tenant_id,
|
||||
'admin_state_up': True,
|
||||
'name': 'mgmt',
|
||||
'network_id': mgmt_nw_id,
|
||||
'mac_address': attributes.ATTR_NOT_SPECIFIED,
|
||||
'fixed_ips': attributes.ATTR_NOT_SPECIFIED,
|
||||
'n1kv:profile_id': self.mgmt_port_profile_id(),
|
||||
'device_id': "",
|
||||
# Use device_owner attribute to ensure we can query for these
|
||||
# ports even before Nova has set device_id attribute.
|
||||
'device_owner': complementary_id}}
|
||||
try:
|
||||
mgmt_port = self._core_plugin.create_port(context,
|
||||
p_spec)
|
||||
# The trunk networks
|
||||
n_spec = {'network': {'tenant_id': tenant_id,
|
||||
'admin_state_up': True,
|
||||
'name': n1kv_const.T1_NETWORK_NAME,
|
||||
'shared': False}}
|
||||
# Until Nova allows spinning up VMs with VIFs on
|
||||
# networks without subnet(s) we create "dummy" subnets
|
||||
# for the trunk networks
|
||||
s_spec = {'subnet': {
|
||||
'tenant_id': tenant_id,
|
||||
'admin_state_up': True,
|
||||
'cidr': n1kv_const.SUBNET_PREFIX,
|
||||
'enable_dhcp': False,
|
||||
'gateway_ip': attributes.ATTR_NOT_SPECIFIED,
|
||||
'allocation_pools': attributes.ATTR_NOT_SPECIFIED,
|
||||
'ip_version': 4,
|
||||
'dns_nameservers': attributes.ATTR_NOT_SPECIFIED,
|
||||
'host_routes': attributes.ATTR_NOT_SPECIFIED}}
|
||||
for i in xrange(max_hosted):
|
||||
# Create T1 trunk network for this router
|
||||
self._create_resources(
|
||||
context, "T1", i, n_spec, n1kv_const.T1_NETWORK_NAME,
|
||||
self.t1_network_profile_id(), t1_n, s_spec,
|
||||
n1kv_const.T1_SUBNET_NAME, t1_sn, p_spec,
|
||||
n1kv_const.T1_PORT_NAME, self.t1_port_profile_id(),
|
||||
t_p)
|
||||
# Create T2 trunk network for this router
|
||||
self._create_resources(
|
||||
context, "T2", i, n_spec, n1kv_const.T2_NETWORK_NAME,
|
||||
self.t2_network_profile_id(), t2_n, s_spec,
|
||||
n1kv_const.T2_SUBNET_NAME, t2_sn, p_spec,
|
||||
n1kv_const.T2_PORT_NAME, self.t2_port_profile_id(),
|
||||
t_p)
|
||||
except n_exc.NeutronException as e:
|
||||
LOG.error(_LE('Error %s when creating service VM resources. '
|
||||
'Cleaning up.'), e)
|
||||
resources = {'ports': t_p, 'networks': t1_n + t2_n,
|
||||
'subnets': t1_sn + t2_sn}
|
||||
self.delete_hosting_device_resources(
|
||||
context, tenant_id, mgmt_port, **resources)
|
||||
mgmt_port = None
|
||||
t1_n, t1_sn, t2_n, t2_sn, t_p = [], [], [], [], []
|
||||
return {'mgmt_port': mgmt_port,
|
||||
'ports': t_p,
|
||||
'networks': t1_n + t2_n,
|
||||
'subnets': t1_sn + t2_sn}
|
||||
|
||||
def _create_resources(self, context, type_name, resource_index,
|
||||
n_spec, net_namebase, net_profile, t_n,
|
||||
s_spec, subnet_namebase, t_sn,
|
||||
p_spec, port_namebase, port_profile, t_p):
|
||||
index = str(resource_index + 1)
|
||||
# Create trunk network
|
||||
n_spec['network'].update({'name': net_namebase + index,
|
||||
'n1kv:profile_id': net_profile})
|
||||
t_n.append(self._core_plugin.create_network(context, n_spec))
|
||||
LOG.debug('Created %(t_n)s network with name %(name)s and id %(id)s',
|
||||
{'t_n': type_name, 'name': n_spec['network']['name'],
|
||||
'id': t_n[resource_index]['id']})
|
||||
# Create dummy subnet for the trunk network
|
||||
s_spec['subnet'].update({'name': subnet_namebase + index,
|
||||
'network_id': t_n[resource_index]['id']})
|
||||
t_sn.append(self._core_plugin.create_subnet(context, s_spec))
|
||||
# Create port for on trunk network
|
||||
p_spec['port'].update({'name': port_namebase + index,
|
||||
'network_id': t_n[resource_index]['id'],
|
||||
'n1kv:profile_id': port_profile})
|
||||
t_p.append(self._core_plugin.create_port(context, p_spec))
|
||||
LOG.debug('Created %(t_n)s port with name %(name)s, id %(id)s on '
|
||||
'subnet %(subnet)s',
|
||||
{'t_n': type_name, 'name': t_n[resource_index]['name'],
|
||||
'id': t_n[resource_index]['id'],
|
||||
'subnet': t_sn[resource_index]['id']})
|
||||
|
||||
def get_hosting_device_resources(self, context, id, complementary_id,
|
||||
tenant_id, mgmt_nw_id):
|
||||
ports, nets, subnets = [], [], []
|
||||
mgmt_port = None
|
||||
# Ports for hosting device may not yet have 'device_id' set to
|
||||
# Nova assigned uuid of VM instance. However, those ports will still
|
||||
# have 'device_owner' attribute set to complementary_id. Hence, we
|
||||
# use both attributes in the query to ensure we find all ports.
|
||||
query = context.session.query(models_v2.Port)
|
||||
query = query.filter(expr.or_(
|
||||
models_v2.Port.device_id == id,
|
||||
models_v2.Port.device_owner == complementary_id))
|
||||
for port in query:
|
||||
if port['network_id'] != mgmt_nw_id:
|
||||
ports.append(port)
|
||||
nets.append({'id': port['network_id']})
|
||||
subnets.append({'id': port['fixed_ips'][0]['subnet_id']})
|
||||
else:
|
||||
mgmt_port = port
|
||||
return {'mgmt_port': mgmt_port,
|
||||
'ports': ports, 'networks': nets, 'subnets': subnets}
|
||||
|
||||
def delete_hosting_device_resources(self, context, tenant_id, mgmt_port,
|
||||
**kwargs):
|
||||
attempts = 1
|
||||
port_ids = set(p['id'] for p in kwargs['ports'])
|
||||
subnet_ids = set(s['id'] for s in kwargs['subnets'])
|
||||
net_ids = set(n['id'] for n in kwargs['networks'])
|
||||
|
||||
while mgmt_port is not None or port_ids or subnet_ids or net_ids:
|
||||
if attempts == DELETION_ATTEMPTS:
|
||||
LOG.warning(_LW('Aborting resource deletion after %d '
|
||||
'unsuccessful attempts'), DELETION_ATTEMPTS)
|
||||
return
|
||||
else:
|
||||
if attempts > 1:
|
||||
eventlet.sleep(SECONDS_BETWEEN_DELETION_ATTEMPTS)
|
||||
LOG.info(_LI('Resource deletion attempt %d starting'),
|
||||
attempts)
|
||||
# Remove anything created.
|
||||
if mgmt_port is not None:
|
||||
ml = set([mgmt_port['id']])
|
||||
self._delete_resources(context, "management port",
|
||||
self._core_plugin.delete_port,
|
||||
n_exc.PortNotFound, ml)
|
||||
if not ml:
|
||||
mgmt_port = None
|
||||
self._delete_resources(context, "trunk port",
|
||||
self._core_plugin.delete_port,
|
||||
n_exc.PortNotFound, port_ids)
|
||||
self._delete_resources(context, "subnet",
|
||||
self._core_plugin.delete_subnet,
|
||||
n_exc.SubnetNotFound, subnet_ids)
|
||||
self._delete_resources(context, "trunk network",
|
||||
self._core_plugin.delete_network,
|
||||
n_exc.NetworkNotFound, net_ids)
|
||||
attempts += 1
|
||||
LOG.info(_LI('Resource deletion succeeded'))
|
||||
|
||||
def _delete_resources(self, context, name, deleter, exception_type,
|
||||
resource_ids):
|
||||
for item_id in resource_ids.copy():
|
||||
try:
|
||||
deleter(context, item_id)
|
||||
resource_ids.remove(item_id)
|
||||
except exception_type:
|
||||
resource_ids.remove(item_id)
|
||||
except n_exc.NeutronException as e:
|
||||
LOG.error(_LE('Failed to delete %(resource_name)s %(net_id)s '
|
||||
'for service vm due to %(err)s'),
|
||||
{'resource_name': name, 'net_id': item_id, 'err': e})
|
||||
|
||||
def setup_logical_port_connectivity(self, context, port_db):
|
||||
# Add the VLAN to the VLANs that the hosting port trunks.
|
||||
self._perform_logical_port_connectivity_action(
|
||||
context, port_db, 'Adding', n1kv.SEGMENT_ADD)
|
||||
|
||||
def teardown_logical_port_connectivity(self, context, port_db):
|
||||
# Remove the VLAN from the VLANs that the hosting port trunks.
|
||||
self._perform_logical_port_connectivity_action(
|
||||
context, port_db, 'Removing', n1kv.SEGMENT_DEL)
|
||||
|
||||
def extend_hosting_port_info(self, context, port_db, hosting_info):
|
||||
hosting_info['segmentation_id'] = port_db.hosting_info.segmentation_id
|
||||
|
||||
def allocate_hosting_port(self, context, router_id, port_db, network_type,
|
||||
hosting_device_id):
|
||||
allocations = self._get_router_ports_with_hosting_info_qry(
|
||||
context, router_id).all()
|
||||
trunk_mappings = {}
|
||||
if not allocations:
|
||||
# Router has no ports with hosting port allocated to them yet
|
||||
# whatsoever, so we select an unused port (that trunks networks
|
||||
# of correct type) on the hosting device.
|
||||
id_allocated_port = self._get_unused_service_vm_trunk_port(
|
||||
context, hosting_device_id, network_type)
|
||||
else:
|
||||
# Router has at least one port with hosting port allocated to it.
|
||||
# If there is only one allocated hosting port then it may be for
|
||||
# the wrong network type. Iterate to determine the hosting port.
|
||||
id_allocated_port = None
|
||||
for item in allocations:
|
||||
if item.hosting_info['network_type'] == network_type:
|
||||
# For VXLAN we need to determine used link local tags.
|
||||
# For VLAN we don't need to but the following lines will
|
||||
# be performed once anyway since we break out of the
|
||||
# loop later. That does not matter.
|
||||
tag = item.hosting_info['segmentation_id']
|
||||
trunk_mappings[item['network_id']] = tag
|
||||
id_allocated_port = item.hosting_info['hosting_port_id']
|
||||
else:
|
||||
port_twin_id = item.hosting_info['hosting_port_id']
|
||||
if network_type == 'vlan':
|
||||
# For a router port belonging to a VLAN network we can
|
||||
# break here since we now know (or have information to
|
||||
# determine) hosting_port and the VLAN tag is provided by
|
||||
# the core plugin.
|
||||
break
|
||||
if id_allocated_port is None:
|
||||
# Router only had hosting port for wrong network
|
||||
# type allocated yet. So get that port's sibling.
|
||||
id_allocated_port = self._get_other_port_id_in_pair(
|
||||
context, port_twin_id, hosting_device_id)
|
||||
if id_allocated_port is None:
|
||||
# Database must have been messed up if this happens ...
|
||||
LOG.debug('n1kv_trunking_driver: Could not allocate hosting port')
|
||||
return
|
||||
if network_type == 'vxlan':
|
||||
# For VLXAN we choose the (link local) VLAN tag
|
||||
used_tags = set(trunk_mappings.values())
|
||||
allocated_vlan = min(sorted(FULL_VLAN_SET - used_tags))
|
||||
else:
|
||||
# For VLAN core plugin provides VLAN tag.
|
||||
trunk_mappings[port_db['network_id']] = None
|
||||
tags = self._core_plugin.get_networks(
|
||||
context, {'id': [port_db['network_id']]},
|
||||
[pr_net.SEGMENTATION_ID])
|
||||
allocated_vlan = (None if tags == []
|
||||
else tags[0].get(pr_net.SEGMENTATION_ID))
|
||||
if allocated_vlan is None:
|
||||
# Database must have been messed up if this happens ...
|
||||
LOG.debug('n1kv_trunking_driver: Could not allocate VLAN')
|
||||
return
|
||||
return {'allocated_port_id': id_allocated_port,
|
||||
'allocated_vlan': allocated_vlan}
|
||||
|
||||
def _perform_logical_port_connectivity_action(self, context, port_db,
|
||||
action_str, action):
|
||||
if (port_db is None or port_db.hosting_info is None or
|
||||
port_db.hosting_info.hosting_port is None):
|
||||
return
|
||||
np_id_t_nw = self._core_plugin.get_network(
|
||||
context, port_db.hosting_info.hosting_port['network_id'],
|
||||
[n1kv.PROFILE_ID])
|
||||
if np_id_t_nw.get(n1kv.PROFILE_ID) == self.t1_network_profile_id():
|
||||
# for vxlan trunked segment, id:s end with ':'link local vlan tag
|
||||
trunk_spec = (port_db['network_id'] + ':' +
|
||||
str(port_db.hosting_info.segmentation_id))
|
||||
else:
|
||||
trunk_spec = port_db['network_id']
|
||||
LOG.info(_LI('Updating trunk: %(action)s VLAN %(tag)d for network_id '
|
||||
'%(id)s'), {'action': action,
|
||||
'tag': port_db.hosting_info.segmentation_id,
|
||||
'id': port_db['network_id']})
|
||||
#TODO(bobmel): enable statement below when N1kv does not trunk all
|
||||
if False:
|
||||
self._core_plugin.update_network(
|
||||
context, port_db.hosting_info.hosting_port['network_id'],
|
||||
{'network': {action: trunk_spec}})
|
||||
|
||||
def _get_trunk_mappings(self, context, hosting_port_id):
|
||||
query = context.session.query(l3_models.HostedHostingPortBinding)
|
||||
query = query.filter(
|
||||
l3_models.HostedHostingPortBinding.hosting_port_id ==
|
||||
hosting_port_id)
|
||||
return dict((hhpb.logical_port['network_id'], hhpb.segmentation_id)
|
||||
for hhpb in query)
|
||||
|
||||
def _get_unused_service_vm_trunk_port(self, context, hd_id, network_type):
|
||||
name = (n1kv_const.T2_PORT_NAME if network_type == 'vlan'
|
||||
else n1kv_const.T1_PORT_NAME)
|
||||
attempts = 0
|
||||
while True:
|
||||
# mysql> SELECT * FROM ports WHERE device_id = 'hd_id1' AND
|
||||
# id NOT IN (SELECT hosting_port_id FROM hostedhostingportbindings)
|
||||
# AND
|
||||
# name LIKE '%t1%'
|
||||
# ORDER BY name;
|
||||
stmt = context.session.query(
|
||||
l3_models.HostedHostingPortBinding.hosting_port_id).subquery()
|
||||
query = context.session.query(models_v2.Port.id)
|
||||
query = query.filter(
|
||||
expr.and_(models_v2.Port.device_id == hd_id,
|
||||
~models_v2.Port.id.in_(stmt),
|
||||
models_v2.Port.name.like('%' + name + '%')))
|
||||
query = query.order_by(models_v2.Port.name)
|
||||
res = query.first()
|
||||
if res is None:
|
||||
if attempts >= MAX_HOSTING_PORT_LOOKUP_ATTEMPTS:
|
||||
# This should not happen ...
|
||||
LOG.error(_LE('Hosting port DB inconsistency for '
|
||||
'hosting device %s'), hd_id)
|
||||
return
|
||||
else:
|
||||
# The service VM may not have plugged its VIF into the
|
||||
# Neutron Port yet so we wait and make another lookup.
|
||||
attempts += 1
|
||||
LOG.info(_LI('Attempt %(attempt)d to find trunk ports for '
|
||||
'hosting device %(hd_id)s failed. Trying '
|
||||
'again in %(time)d seconds.'),
|
||||
{'attempt': attempts, 'hd_id': hd_id,
|
||||
'time': SECONDS_BETWEEN_HOSTING_PORT_LOOKSUPS})
|
||||
eventlet.sleep(SECONDS_BETWEEN_HOSTING_PORT_LOOKSUPS)
|
||||
else:
|
||||
break
|
||||
return res[0]
|
||||
|
||||
def _get_router_ports_with_hosting_info_qry(self, context, router_id,
|
||||
device_owner=None,
|
||||
hosting_port_id=None):
|
||||
# Query for a router's ports that have trunking information
|
||||
query = context.session.query(models_v2.Port)
|
||||
query = query.join(
|
||||
l3_models.HostedHostingPortBinding,
|
||||
models_v2.Port.id ==
|
||||
l3_models.HostedHostingPortBinding.logical_port_id)
|
||||
query = query.filter(models_v2.Port.device_id == router_id)
|
||||
if device_owner is not None:
|
||||
query = query.filter(models_v2.Port.device_owner == device_owner)
|
||||
if hosting_port_id is not None:
|
||||
query = query.filter(
|
||||
l3_models.HostedHostingPortBinding.hosting_port_id ==
|
||||
hosting_port_id)
|
||||
return query
|
||||
|
||||
def _get_other_port_id_in_pair(self, context, port_id, hosting_device_id):
|
||||
query = context.session.query(models_v2.Port)
|
||||
query = query.filter(models_v2.Port.id == port_id)
|
||||
try:
|
||||
port = query.one()
|
||||
name, index = port['name'].split(':')
|
||||
name += ':'
|
||||
if name == n1kv_const.T1_PORT_NAME:
|
||||
other_port_name = n1kv_const.T2_PORT_NAME + index
|
||||
else:
|
||||
other_port_name = n1kv_const.T1_PORT_NAME + index
|
||||
query = context.session.query(models_v2.Port)
|
||||
query = query.filter(models_v2.Port.device_id == hosting_device_id,
|
||||
models_v2.Port.name == other_port_name)
|
||||
other_port = query.one()
|
||||
return other_port['id']
|
||||
except (exc.NoResultFound, exc.MultipleResultsFound):
|
||||
# This should not happen ...
|
||||
LOG.error(_LE('Port trunk pair DB inconsistency for port %s'),
|
||||
port_id)
|
||||
return
|
@ -1,46 +0,0 @@
|
||||
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
class DeviceCfgRpcCallbackMixin(object):
|
||||
"""Mixin for Cisco cfg agent device reporting rpc support."""
|
||||
|
||||
def report_non_responding_hosting_devices(self, context, host,
|
||||
hosting_device_ids):
|
||||
"""Report that a hosting device cannot be contacted.
|
||||
|
||||
@param: context - contains user information
|
||||
@param: host - originator of callback
|
||||
@param: hosting_device_ids - list of non-responding hosting devices
|
||||
@return: -
|
||||
"""
|
||||
self._l3plugin.handle_non_responding_hosting_devices(
|
||||
context, host, hosting_device_ids)
|
||||
|
||||
def register_for_duty(self, context, host):
|
||||
"""Report that Cisco cfg agent is ready for duty.
|
||||
|
||||
This function is supposed to be called when the agent has started,
|
||||
is ready to take on assignments and before any callbacks to fetch
|
||||
logical resources are issued.
|
||||
|
||||
@param: context - contains user information
|
||||
@param: host - originator of callback
|
||||
@return: True if successfully registered, False if not successfully
|
||||
registered, None if no handler found
|
||||
If unsuccessful the agent should retry registration a few
|
||||
seconds later
|
||||
"""
|
||||
# schedule any non-handled hosting devices
|
||||
return self._l3plugin.auto_schedule_hosting_devices(context, host)
|
@ -1,69 +0,0 @@
|
||||
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_serialization import jsonutils
|
||||
|
||||
from neutron.common import constants
|
||||
from neutron.common import utils
|
||||
from neutron import context as neutron_context
|
||||
from neutron.extensions import portbindings
|
||||
from neutron.openstack.common import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class L3RouterCfgRpcCallbackMixin(object):
|
||||
"""Mixin for Cisco cfg agent rpc support in L3 routing service plugin."""
|
||||
|
||||
def cfg_sync_routers(self, context, host, router_ids=None,
|
||||
hosting_device_ids=None):
|
||||
"""Sync routers according to filters to a specific Cisco cfg agent.
|
||||
|
||||
@param context: contains user information
|
||||
@param host - originator of callback
|
||||
@param router_ids - list of router ids to return information about
|
||||
@param hosting_device_ids - list of hosting device ids to get
|
||||
routers for.
|
||||
@return: a list of routers
|
||||
with their hosting devices, interfaces and floating_ips
|
||||
"""
|
||||
context = neutron_context.get_admin_context()
|
||||
try:
|
||||
routers = (
|
||||
self._l3plugin.list_active_sync_routers_on_hosting_devices(
|
||||
context, host, router_ids, hosting_device_ids))
|
||||
except AttributeError:
|
||||
routers = []
|
||||
if routers and utils.is_extension_supported(
|
||||
self._core_plugin, constants.PORT_BINDING_EXT_ALIAS):
|
||||
self._ensure_host_set_on_ports(context, host, routers)
|
||||
LOG.debug('Routers returned to Cisco cfg agent@%(agt)s:\n %(routers)s',
|
||||
{'agt': host, 'routers': jsonutils.dumps(routers, indent=5)})
|
||||
return routers
|
||||
|
||||
def _ensure_host_set_on_ports(self, context, host, routers):
|
||||
for router in routers:
|
||||
LOG.debug('Checking router: %(id)s for host: %(host)s',
|
||||
{'id': router['id'], 'host': host})
|
||||
self._ensure_host_set_on_port(context, host, router.get('gw_port'))
|
||||
for interface in router.get(constants.INTERFACE_KEY, []):
|
||||
self._ensure_host_set_on_port(context, host, interface)
|
||||
|
||||
def _ensure_host_set_on_port(self, context, host, port):
|
||||
if (port and
|
||||
(port.get(portbindings.HOST_ID) != host or
|
||||
port.get(portbindings.VIF_TYPE) ==
|
||||
portbindings.VIF_TYPE_BINDING_FAILED)):
|
||||
self._core_plugin.update_port(
|
||||
context, port['id'], {'port': {portbindings.HOST_ID: host}})
|
@ -1,95 +0,0 @@
|
||||
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
import oslo_messaging
|
||||
|
||||
from neutron.common import rpc as n_rpc
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.cisco.common import cisco_constants as c_constants
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class L3RouterJointAgentNotifyAPI(object):
|
||||
"""API for plugin to notify Cisco cfg agent."""
|
||||
|
||||
def __init__(self, l3plugin):
|
||||
self._l3plugin = l3plugin
|
||||
self.topic = c_constants.CFG_AGENT_L3_ROUTING
|
||||
target = oslo_messaging.Target(topic=self.topic, version='1.0')
|
||||
self.client = n_rpc.get_client(target)
|
||||
|
||||
def _agent_notification(self, context, method, routers, operation, data):
|
||||
"""Notify individual Cisco cfg agents."""
|
||||
admin_context = context if context.is_admin else context.elevated()
|
||||
for router in routers:
|
||||
if router['hosting_device'] is None:
|
||||
continue
|
||||
agents = self._l3plugin.get_cfg_agents_for_hosting_devices(
|
||||
admin_context, [router['hosting_device']['id']],
|
||||
admin_state_up=True, active=True, schedule=True)
|
||||
for agent in agents:
|
||||
LOG.debug('Notify %(agent_type)s at %(topic)s.%(host)s the '
|
||||
'message %(method)s',
|
||||
{'agent_type': agent.agent_type,
|
||||
'topic': c_constants.CFG_AGENT_L3_ROUTING,
|
||||
'host': agent.host,
|
||||
'method': method})
|
||||
cctxt = self.client.prepare(server=agent.host)
|
||||
cctxt.cast(context, method, routers=[router['id']])
|
||||
|
||||
def router_deleted(self, context, router):
|
||||
"""Notifies agents about a deleted router."""
|
||||
self._agent_notification(context, 'router_deleted', [router],
|
||||
operation=None, data=None)
|
||||
|
||||
def routers_updated(self, context, routers, operation=None, data=None):
|
||||
"""Notifies agents about configuration changes to routers.
|
||||
|
||||
This includes operations performed on the router like when a
|
||||
router interface is added or removed.
|
||||
"""
|
||||
if routers:
|
||||
self._agent_notification(context, 'routers_updated', routers,
|
||||
operation, data)
|
||||
|
||||
def hosting_devices_removed(self, context, hosting_data, deconfigure,
|
||||
host):
|
||||
"""Notify cfg agent that some hosting devices have been removed.
|
||||
|
||||
This notification informs the cfg agent in <host> that the
|
||||
hosting devices in the <hosting_data> dictionary have been removed
|
||||
from the hosting device pool. The <hosting_data> dictionary also
|
||||
contains the ids of the affected logical resources for each hosting
|
||||
devices:
|
||||
{'hd_id1': {'routers': [id1, id2, ...],
|
||||
'fw': [id1, ...],
|
||||
...},
|
||||
'hd_id2': {'routers': [id3, id4, ...]},
|
||||
'fw': [id1, ...],
|
||||
...},
|
||||
...}
|
||||
The <deconfigure> argument is True if any configurations for the
|
||||
logical resources should be removed from the hosting devices
|
||||
"""
|
||||
if not hosting_data:
|
||||
return
|
||||
|
||||
LOG.debug('Notify Cisco cfg agent at %(host)s the message '
|
||||
'hosting_devices_removed', {'host': host})
|
||||
|
||||
payload = {'hosting_data': hosting_data, 'deconfigure': deconfigure}
|
||||
cctxt = self.client.prepare(topic=c_constants.CFG_AGENT, server=host)
|
||||
cctxt.cast(context, 'hosting_devices_removed', payload=payload)
|
@ -1,143 +0,0 @@
|
||||
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from novaclient import client
|
||||
from novaclient import exceptions as nova_exc
|
||||
from novaclient import utils as n_utils
|
||||
from oslo_config import cfg
|
||||
|
||||
from neutron.i18n import _LE
|
||||
from neutron import manager
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.cisco.common import cisco_constants as c_constants
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
NOVA_API_VERSION = "2"
|
||||
|
||||
|
||||
SERVICE_VM_LIB_OPTS = [
|
||||
cfg.StrOpt('templates_path',
|
||||
default='/opt/stack/data/neutron/cisco/templates',
|
||||
help=_("Path to templates for hosting devices.")),
|
||||
cfg.StrOpt('service_vm_config_path',
|
||||
default='/opt/stack/data/neutron/cisco/config_drive',
|
||||
help=_("Path to config drive files for service VM instances.")),
|
||||
]
|
||||
|
||||
cfg.CONF.register_opts(SERVICE_VM_LIB_OPTS, "general")
|
||||
|
||||
|
||||
class ServiceVMManager(object):
|
||||
|
||||
def __init__(self, user=None, passwd=None, l3_admin_tenant=None,
|
||||
auth_url=''):
|
||||
self._nclient = client.Client(NOVA_API_VERSION, user, passwd,
|
||||
l3_admin_tenant, auth_url,
|
||||
service_type="compute")
|
||||
|
||||
@property
|
||||
def _core_plugin(self):
|
||||
return manager.NeutronManager.get_plugin()
|
||||
|
||||
def nova_services_up(self):
|
||||
"""Checks if required Nova services are up and running.
|
||||
|
||||
returns: True if all needed Nova services are up, False otherwise
|
||||
"""
|
||||
required = set(['nova-conductor', 'nova-cert', 'nova-scheduler',
|
||||
'nova-compute', 'nova-consoleauth'])
|
||||
try:
|
||||
services = self._nclient.services.list()
|
||||
# There are several individual Nova client exceptions but they have
|
||||
# no other common base than Exception, hence the long list.
|
||||
except (nova_exc.UnsupportedVersion, nova_exc.CommandError,
|
||||
nova_exc.AuthorizationFailure, nova_exc.NoUniqueMatch,
|
||||
nova_exc.AuthSystemNotFound, nova_exc.NoTokenLookupException,
|
||||
nova_exc.EndpointNotFound, nova_exc.AmbiguousEndpoints,
|
||||
nova_exc.ConnectionRefused, nova_exc.ClientException,
|
||||
Exception) as e:
|
||||
LOG.error(_LE('Failure determining running Nova services: %s'), e)
|
||||
return False
|
||||
return not bool(required.difference(
|
||||
[service.binary for service in services
|
||||
if service.status == 'enabled' and service.state == 'up']))
|
||||
|
||||
def get_service_vm_status(self, vm_id):
|
||||
try:
|
||||
status = self._nclient.servers.get(vm_id).status
|
||||
# There are several individual Nova client exceptions but they have
|
||||
# no other common base than Exception, hence the long list.
|
||||
except (nova_exc.UnsupportedVersion, nova_exc.CommandError,
|
||||
nova_exc.AuthorizationFailure, nova_exc.NoUniqueMatch,
|
||||
nova_exc.AuthSystemNotFound, nova_exc.NoTokenLookupException,
|
||||
nova_exc.EndpointNotFound, nova_exc.AmbiguousEndpoints,
|
||||
nova_exc.ConnectionRefused, nova_exc.ClientException,
|
||||
Exception) as e:
|
||||
LOG.error(_LE('Failed to get status of service VM instance '
|
||||
'%(id)s, due to %(err)s'), {'id': vm_id, 'err': e})
|
||||
status = c_constants.SVM_ERROR
|
||||
return status
|
||||
|
||||
def dispatch_service_vm(self, context, instance_name, vm_image,
|
||||
vm_flavor, hosting_device_drv, mgmt_port,
|
||||
ports=None):
|
||||
nics = [{'port-id': mgmt_port['id']}]
|
||||
for port in ports:
|
||||
nics.append({'port-id': port['id']})
|
||||
|
||||
try:
|
||||
image = n_utils.find_resource(self._nclient.images, vm_image)
|
||||
flavor = n_utils.find_resource(self._nclient.flavors, vm_flavor)
|
||||
except (nova_exc.CommandError, Exception) as e:
|
||||
LOG.error(_LE('Failure finding needed Nova resource: %s'), e)
|
||||
return
|
||||
|
||||
try:
|
||||
# Assumption for now is that this does not need to be
|
||||
# plugin dependent, only hosting device type dependent.
|
||||
files = hosting_device_drv.create_config(context, mgmt_port)
|
||||
except IOError:
|
||||
return
|
||||
|
||||
try:
|
||||
server = self._nclient.servers.create(
|
||||
instance_name, image.id, flavor.id, nics=nics, files=files,
|
||||
config_drive=(files != {}))
|
||||
# There are several individual Nova client exceptions but they have
|
||||
# no other common base than Exception, therefore the long list.
|
||||
except (nova_exc.UnsupportedVersion, nova_exc.CommandError,
|
||||
nova_exc.AuthorizationFailure, nova_exc.NoUniqueMatch,
|
||||
nova_exc.AuthSystemNotFound, nova_exc.NoTokenLookupException,
|
||||
nova_exc.EndpointNotFound, nova_exc.AmbiguousEndpoints,
|
||||
nova_exc.ConnectionRefused, nova_exc.ClientException,
|
||||
Exception) as e:
|
||||
LOG.error(_LE('Failed to create service VM instance: %s'), e)
|
||||
return
|
||||
return {'id': server.id}
|
||||
|
||||
def delete_service_vm(self, context, vm_id):
|
||||
try:
|
||||
self._nclient.servers.delete(vm_id)
|
||||
return True
|
||||
# There are several individual Nova client exceptions but they have
|
||||
# no other common base than Exception, therefore the long list.
|
||||
except (nova_exc.UnsupportedVersion, nova_exc.CommandError,
|
||||
nova_exc.AuthorizationFailure, nova_exc.NoUniqueMatch,
|
||||
nova_exc.AuthSystemNotFound, nova_exc.NoTokenLookupException,
|
||||
nova_exc.EndpointNotFound, nova_exc.AmbiguousEndpoints,
|
||||
nova_exc.ConnectionRefused, nova_exc.ClientException,
|
||||
Exception) as e:
|
||||
LOG.error(_LE('Failed to delete service VM instance %(id)s, '
|
||||
'due to %(err)s'), {'id': vm_id, 'err': e})
|
||||
return False
|
@ -1,4 +1,4 @@
|
||||
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
|
||||
# Copyright 2015 Cisco Systems, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
@ -12,78 +12,13 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import oslo_messaging
|
||||
|
||||
from neutron.common import rpc as n_rpc
|
||||
from neutron.common import topics
|
||||
from neutron.db import agents_db
|
||||
from neutron.db import common_db_mixin
|
||||
from neutron import manager
|
||||
from neutron.plugins.cisco.db.l3 import device_handling_db
|
||||
from neutron.plugins.cisco.db.l3 import l3_router_appliance_db
|
||||
from neutron.plugins.cisco.l3.rpc import (l3_router_cfgagent_rpc_cb as
|
||||
l3_router_rpc)
|
||||
from neutron.plugins.cisco.l3.rpc import devices_cfgagent_rpc_cb as devices_rpc
|
||||
from neutron.plugins.common import constants
|
||||
from networking_cisco.plugins.cisco.service_plugins import cisco_router_plugin
|
||||
|
||||
|
||||
class CiscoRouterPluginRpcCallbacks(l3_router_rpc.L3RouterCfgRpcCallbackMixin,
|
||||
devices_rpc.DeviceCfgRpcCallbackMixin):
|
||||
|
||||
target = oslo_messaging.Target(version='1.1')
|
||||
|
||||
def __init__(self, l3plugin):
|
||||
super(CiscoRouterPluginRpcCallbacks, self).__init__()
|
||||
self._l3plugin = l3plugin
|
||||
|
||||
@property
|
||||
def _core_plugin(self):
|
||||
return manager.NeutronManager.get_plugin()
|
||||
class CiscoRouterPluginRpcCallbacks(
|
||||
cisco_router_plugin.CiscoRouterPluginRpcCallbacks):
|
||||
pass
|
||||
|
||||
|
||||
class CiscoRouterPlugin(common_db_mixin.CommonDbMixin,
|
||||
agents_db.AgentDbMixin,
|
||||
l3_router_appliance_db.L3RouterApplianceDBMixin,
|
||||
device_handling_db.DeviceHandlingMixin):
|
||||
|
||||
"""Implementation of Cisco L3 Router Service Plugin for Neutron.
|
||||
|
||||
This class implements a L3 service plugin that provides
|
||||
router and floatingip resources and manages associated
|
||||
request/response.
|
||||
All DB functionality is implemented in class
|
||||
l3_router_appliance_db.L3RouterApplianceDBMixin.
|
||||
"""
|
||||
supported_extension_aliases = ["router", "extraroute"]
|
||||
|
||||
def __init__(self):
|
||||
self.setup_rpc()
|
||||
# for backlogging of non-scheduled routers
|
||||
self._setup_backlog_handling()
|
||||
self._setup_device_handling()
|
||||
|
||||
def setup_rpc(self):
|
||||
# RPC support
|
||||
self.topic = topics.L3PLUGIN
|
||||
self.conn = n_rpc.create_connection(new=True)
|
||||
self.endpoints = [CiscoRouterPluginRpcCallbacks(self)]
|
||||
self.conn.create_consumer(self.topic, self.endpoints,
|
||||
fanout=False)
|
||||
# Consume from all consumers in threads
|
||||
self.conn.consume_in_threads()
|
||||
|
||||
def get_plugin_type(self):
|
||||
return constants.L3_ROUTER_NAT
|
||||
|
||||
def get_plugin_description(self):
|
||||
return ("Cisco Router Service Plugin for basic L3 forwarding"
|
||||
" between (L2) Neutron networks and access to external"
|
||||
" networks via a NAT gateway.")
|
||||
|
||||
@property
|
||||
def _core_plugin(self):
|
||||
try:
|
||||
return self._plugin
|
||||
except AttributeError:
|
||||
self._plugin = manager.NeutronManager.get_plugin()
|
||||
return self._plugin
|
||||
class CiscoRouterPlugin(cisco_router_plugin.CiscoRouterPlugin):
|
||||
pass
|
||||
|
1
neutron/plugins/cisco/service_plugins/requirements.txt
Normal file
1
neutron/plugins/cisco/service_plugins/requirements.txt
Normal file
@ -0,0 +1 @@
|
||||
networking-cisco
|
@ -1,136 +0,0 @@
|
||||
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import mock
|
||||
from oslo_config import cfg
|
||||
import testtools
|
||||
|
||||
from neutron.agent.common import config
|
||||
from neutron.common import config as base_config
|
||||
from neutron.common import constants as l3_constants
|
||||
from neutron.openstack.common import uuidutils
|
||||
from neutron.plugins.cisco.cfg_agent import cfg_agent
|
||||
from neutron.tests import base
|
||||
|
||||
_uuid = uuidutils.generate_uuid
|
||||
HOSTNAME = 'myhost'
|
||||
FAKE_ID = _uuid()
|
||||
|
||||
|
||||
def prepare_router_data(enable_snat=None, num_internal_ports=1):
|
||||
router_id = _uuid()
|
||||
ex_gw_port = {'id': _uuid(),
|
||||
'network_id': _uuid(),
|
||||
'fixed_ips': [{'ip_address': '19.4.4.4',
|
||||
'subnet_id': _uuid()}],
|
||||
'subnet': {'cidr': '19.4.4.0/24',
|
||||
'gateway_ip': '19.4.4.1'}}
|
||||
int_ports = []
|
||||
for i in range(num_internal_ports):
|
||||
int_ports.append({'id': _uuid(),
|
||||
'network_id': _uuid(),
|
||||
'admin_state_up': True,
|
||||
'fixed_ips': [{'ip_address': '35.4.%s.4' % i,
|
||||
'subnet_id': _uuid()}],
|
||||
'mac_address': 'ca:fe:de:ad:be:ef',
|
||||
'subnet': {'cidr': '35.4.%s.0/24' % i,
|
||||
'gateway_ip': '35.4.%s.1' % i}})
|
||||
hosting_device = {'id': _uuid(),
|
||||
'host_type': 'CSR1kv',
|
||||
'ip_address': '20.0.0.5',
|
||||
'port': '23'}
|
||||
|
||||
router = {
|
||||
'id': router_id,
|
||||
l3_constants.INTERFACE_KEY: int_ports,
|
||||
'routes': [],
|
||||
'gw_port': ex_gw_port,
|
||||
'hosting_device': hosting_device}
|
||||
if enable_snat is not None:
|
||||
router['enable_snat'] = enable_snat
|
||||
return router, int_ports
|
||||
|
||||
|
||||
class TestCiscoCfgAgentWIthStateReporting(base.BaseTestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.conf = cfg.ConfigOpts()
|
||||
config.register_agent_state_opts_helper(cfg.CONF)
|
||||
self.conf.register_opts(base_config.core_opts)
|
||||
self.conf.register_opts(cfg_agent.CiscoCfgAgent.OPTS, "cfg_agent")
|
||||
cfg.CONF.set_override('report_interval', 0, 'AGENT')
|
||||
super(TestCiscoCfgAgentWIthStateReporting, self).setUp()
|
||||
self.devmgr_plugin_api_cls_p = mock.patch(
|
||||
'neutron.plugins.cisco.cfg_agent.cfg_agent.'
|
||||
'CiscoDeviceManagementApi')
|
||||
devmgr_plugin_api_cls = self.devmgr_plugin_api_cls_p.start()
|
||||
self.devmgr_plugin_api = mock.Mock()
|
||||
devmgr_plugin_api_cls.return_value = self.devmgr_plugin_api
|
||||
self.devmgr_plugin_api.register_for_duty.return_value = True
|
||||
|
||||
self.plugin_reportstate_api_cls_p = mock.patch(
|
||||
'neutron.agent.rpc.PluginReportStateAPI')
|
||||
plugin_reportstate_api_cls = self.plugin_reportstate_api_cls_p.start()
|
||||
self.plugin_reportstate_api = mock.Mock()
|
||||
plugin_reportstate_api_cls.return_value = self.plugin_reportstate_api
|
||||
|
||||
self.looping_call_p = mock.patch(
|
||||
'neutron.openstack.common.loopingcall.FixedIntervalLoopingCall')
|
||||
self.looping_call_p.start()
|
||||
|
||||
mock.patch('neutron.common.rpc.create_connection').start()
|
||||
|
||||
def test_agent_registration_success(self):
|
||||
agent = cfg_agent.CiscoCfgAgentWithStateReport(HOSTNAME, self.conf)
|
||||
self.assertTrue(agent.devmgr_rpc.register_for_duty(agent.context))
|
||||
|
||||
def test_agent_registration_success_after_2_tries(self):
|
||||
self.devmgr_plugin_api.register_for_duty = mock.Mock(
|
||||
side_effect=[False, False, True])
|
||||
cfg_agent.REGISTRATION_RETRY_DELAY = 0.01
|
||||
agent = cfg_agent.CiscoCfgAgentWithStateReport(HOSTNAME, self.conf)
|
||||
self.assertEqual(agent.devmgr_rpc.register_for_duty.call_count, 3)
|
||||
|
||||
def test_agent_registration_fail_always(self):
|
||||
self.devmgr_plugin_api.register_for_duty = mock.Mock(
|
||||
return_value=False)
|
||||
cfg_agent.REGISTRATION_RETRY_DELAY = 0.01
|
||||
cfg_agent.MAX_REGISTRATION_ATTEMPTS = 3
|
||||
with testtools.ExpectedException(SystemExit):
|
||||
cfg_agent.CiscoCfgAgentWithStateReport(HOSTNAME, self.conf)
|
||||
|
||||
def test_agent_registration_no_device_mgr(self):
|
||||
self.devmgr_plugin_api.register_for_duty = mock.Mock(
|
||||
return_value=None)
|
||||
cfg_agent.REGISTRATION_RETRY_DELAY = 0.01
|
||||
cfg_agent.MAX_REGISTRATION_ATTEMPTS = 3
|
||||
with testtools.ExpectedException(SystemExit):
|
||||
cfg_agent.CiscoCfgAgentWithStateReport(HOSTNAME, self.conf)
|
||||
|
||||
def test_report_state(self):
|
||||
agent = cfg_agent.CiscoCfgAgentWithStateReport(HOSTNAME, self.conf)
|
||||
agent._report_state()
|
||||
self.assertIn('total routers', agent.agent_state['configurations'])
|
||||
self.assertEqual(0, agent.agent_state[
|
||||
'configurations']['total routers'])
|
||||
|
||||
@mock.patch('neutron.plugins.cisco.cfg_agent.'
|
||||
'cfg_agent.CiscoCfgAgentWithStateReport._agent_registration')
|
||||
def test_report_state_attribute_error(self, agent_registration):
|
||||
cfg.CONF.set_override('report_interval', 1, 'AGENT')
|
||||
self.plugin_reportstate_api.report_state.side_effect = AttributeError
|
||||
agent = cfg_agent.CiscoCfgAgentWithStateReport(HOSTNAME, self.conf)
|
||||
agent.heartbeat = mock.Mock()
|
||||
agent.send_agent_report(None, None)
|
||||
self.assertTrue(agent.heartbeat.stop.called)
|
@ -1,282 +0,0 @@
|
||||
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import sys
|
||||
|
||||
import mock
|
||||
import netaddr
|
||||
|
||||
from neutron.common import constants as l3_constants
|
||||
from neutron.openstack.common import uuidutils
|
||||
from neutron.tests import base
|
||||
|
||||
from neutron.plugins.cisco.cfg_agent.device_drivers.csr1kv import (
|
||||
cisco_csr1kv_snippets as snippets)
|
||||
sys.modules['ncclient'] = mock.MagicMock()
|
||||
sys.modules['ciscoconfparse'] = mock.MagicMock()
|
||||
from neutron.plugins.cisco.cfg_agent.device_drivers.csr1kv import (
|
||||
csr1kv_routing_driver as csr_driver)
|
||||
from neutron.plugins.cisco.cfg_agent.service_helpers import routing_svc_helper
|
||||
|
||||
_uuid = uuidutils.generate_uuid
|
||||
FAKE_ID = _uuid()
|
||||
PORT_ID = _uuid()
|
||||
|
||||
|
||||
class TestCSR1kvRouting(base.BaseTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestCSR1kvRouting, self).setUp()
|
||||
|
||||
device_params = {'management_ip_address': 'fake_ip',
|
||||
'protocol_port': 22,
|
||||
'credentials': {"username": "stack",
|
||||
"password": "cisco"},
|
||||
}
|
||||
self.driver = csr_driver.CSR1kvRoutingDriver(
|
||||
**device_params)
|
||||
self.mock_conn = mock.MagicMock()
|
||||
self.driver._csr_conn = self.mock_conn
|
||||
self.driver._check_response = mock.MagicMock(return_value=True)
|
||||
|
||||
self.vrf = ('nrouter-' + FAKE_ID)[:csr_driver.CSR1kvRoutingDriver.
|
||||
DEV_NAME_LEN]
|
||||
self.driver._get_vrfs = mock.Mock(return_value=[self.vrf])
|
||||
self.ex_gw_ip = '20.0.0.30'
|
||||
self.ex_gw_cidr = '20.0.0.30/24'
|
||||
self.ex_gw_vlan = 1000
|
||||
self.ex_gw_gateway_ip = '20.0.0.1'
|
||||
self.ex_gw_port = {'id': _uuid(),
|
||||
'network_id': _uuid(),
|
||||
'fixed_ips': [{'ip_address': self.ex_gw_ip,
|
||||
'subnet_id': _uuid()}],
|
||||
'subnet': {'cidr': self.ex_gw_cidr,
|
||||
'gateway_ip': self.ex_gw_gateway_ip},
|
||||
'ip_cidr': self.ex_gw_cidr,
|
||||
'mac_address': 'ca:fe:de:ad:be:ef',
|
||||
'hosting_info': {'segmentation_id': self.ex_gw_vlan,
|
||||
'hosting_port_name': 't2_p:0'}}
|
||||
self.vlan_no = 500
|
||||
self.gw_ip_cidr = '10.0.0.1/16'
|
||||
self.gw_ip = '10.0.0.1'
|
||||
self.hosting_port = 't1_p:0'
|
||||
self.port = {'id': PORT_ID,
|
||||
'ip_cidr': self.gw_ip_cidr,
|
||||
'fixed_ips': [{'ip_address': self.gw_ip}],
|
||||
'hosting_info': {'segmentation_id': self.vlan_no,
|
||||
'hosting_port_name': self.hosting_port}}
|
||||
int_ports = [self.port]
|
||||
|
||||
self.router = {
|
||||
'id': FAKE_ID,
|
||||
l3_constants.INTERFACE_KEY: int_ports,
|
||||
'enable_snat': True,
|
||||
'routes': [],
|
||||
'gw_port': self.ex_gw_port}
|
||||
|
||||
self.ri = routing_svc_helper.RouterInfo(FAKE_ID, self.router)
|
||||
self.ri.internal_ports = int_ports
|
||||
|
||||
def test_csr_get_vrf_name(self):
|
||||
self.assertEqual(self.driver._csr_get_vrf_name(self.ri), self.vrf)
|
||||
|
||||
def test_create_vrf(self):
|
||||
confstr = snippets.CREATE_VRF % self.vrf
|
||||
|
||||
self.driver._create_vrf(self.vrf)
|
||||
|
||||
self.assertTrue(self.driver._csr_conn.edit_config.called)
|
||||
self.driver._csr_conn.edit_config.assert_called_with(target='running',
|
||||
config=confstr)
|
||||
|
||||
def test_remove_vrf(self):
|
||||
confstr = snippets.REMOVE_VRF % self.vrf
|
||||
|
||||
self.driver._remove_vrf(self.vrf)
|
||||
|
||||
self.assertTrue(self.driver._csr_conn.edit_config.called)
|
||||
self.driver._csr_conn.edit_config.assert_called_with(target='running',
|
||||
config=confstr)
|
||||
|
||||
def test_router_added(self):
|
||||
confstr = snippets.CREATE_VRF % self.vrf
|
||||
|
||||
self.driver.router_added(self.ri)
|
||||
|
||||
self.assertTrue(self.driver._csr_conn.edit_config.called)
|
||||
self.driver._csr_conn.edit_config.assert_called_with(target='running',
|
||||
config=confstr)
|
||||
|
||||
def test_router_removed(self):
|
||||
confstr = snippets.REMOVE_VRF % self.vrf
|
||||
|
||||
self.driver._remove_vrf(self.vrf)
|
||||
|
||||
self.assertTrue(self.driver._csr_conn.edit_config.called)
|
||||
self.driver._csr_conn.edit_config.assert_called_once_with(
|
||||
target='running', config=confstr)
|
||||
|
||||
def test_internal_network_added(self):
|
||||
self.driver._create_subinterface = mock.MagicMock()
|
||||
interface = 'GigabitEthernet0' + '.' + str(self.vlan_no)
|
||||
|
||||
self.driver.internal_network_added(self.ri, self.port)
|
||||
|
||||
args = (interface, self.vlan_no, self.vrf, self.gw_ip,
|
||||
netaddr.IPAddress('255.255.0.0'))
|
||||
self.driver._create_subinterface.assert_called_once_with(*args)
|
||||
|
||||
def test_internal_network_removed(self):
|
||||
self.driver._remove_subinterface = mock.MagicMock()
|
||||
interface = 'GigabitEthernet0' + '.' + str(self.vlan_no)
|
||||
|
||||
self.driver.internal_network_removed(self.ri, self.port)
|
||||
|
||||
self.driver._remove_subinterface.assert_called_once_with(interface)
|
||||
|
||||
def test_routes_updated(self):
|
||||
dest_net = '20.0.0.0/16'
|
||||
next_hop = '10.0.0.255'
|
||||
route = {'destination': dest_net,
|
||||
'nexthop': next_hop}
|
||||
|
||||
dest = netaddr.IPAddress('20.0.0.0')
|
||||
destmask = netaddr.IPNetwork(dest_net).netmask
|
||||
self.driver._add_static_route = mock.MagicMock()
|
||||
self.driver._remove_static_route = mock.MagicMock()
|
||||
|
||||
self.driver.routes_updated(self.ri, 'replace', route)
|
||||
self.driver._add_static_route.assert_called_once_with(
|
||||
dest, destmask, next_hop, self.vrf)
|
||||
|
||||
self.driver.routes_updated(self.ri, 'delete', route)
|
||||
self.driver._remove_static_route.assert_called_once_with(
|
||||
dest, destmask, next_hop, self.vrf)
|
||||
|
||||
def test_floatingip(self):
|
||||
floating_ip = '15.1.2.3'
|
||||
fixed_ip = '10.0.0.3'
|
||||
|
||||
self.driver._add_floating_ip = mock.MagicMock()
|
||||
self.driver._remove_floating_ip = mock.MagicMock()
|
||||
self.driver._add_interface_nat = mock.MagicMock()
|
||||
self.driver._remove_dyn_nat_translations = mock.MagicMock()
|
||||
self.driver._remove_interface_nat = mock.MagicMock()
|
||||
|
||||
self.driver.floating_ip_added(self.ri, self.ex_gw_port,
|
||||
floating_ip, fixed_ip)
|
||||
self.driver._add_floating_ip.assert_called_once_with(
|
||||
floating_ip, fixed_ip, self.vrf)
|
||||
|
||||
self.driver.floating_ip_removed(self.ri, self.ex_gw_port,
|
||||
floating_ip, fixed_ip)
|
||||
|
||||
self.driver._remove_interface_nat.assert_called_once_with(
|
||||
'GigabitEthernet1.1000', 'outside')
|
||||
self.driver._remove_dyn_nat_translations.assert_called_once_with()
|
||||
self.driver._remove_floating_ip.assert_called_once_with(
|
||||
floating_ip, fixed_ip, self.vrf)
|
||||
self.driver._add_interface_nat.assert_called_once_with(
|
||||
'GigabitEthernet1.1000', 'outside')
|
||||
|
||||
def test_external_gateway_added(self):
|
||||
self.driver._create_subinterface = mock.MagicMock()
|
||||
self.driver._add_default_static_route = mock.MagicMock()
|
||||
|
||||
ext_interface = 'GigabitEthernet1' + '.' + str(1000)
|
||||
args = (ext_interface, self.ex_gw_vlan, self.vrf, self.ex_gw_ip,
|
||||
netaddr.IPAddress('255.255.255.0'))
|
||||
|
||||
self.driver.external_gateway_added(self.ri, self.ex_gw_port)
|
||||
|
||||
self.driver._create_subinterface.assert_called_once_with(*args)
|
||||
self.driver._add_default_static_route.assert_called_once_with(
|
||||
self.ex_gw_gateway_ip, self.vrf)
|
||||
|
||||
def test_enable_internal_network_NAT(self):
|
||||
self.driver._nat_rules_for_internet_access = mock.MagicMock()
|
||||
int_interface = ('GigabitEthernet0' + '.' + str(self.vlan_no))
|
||||
ext_interface = 'GigabitEthernet1' + '.' + str(1000)
|
||||
args = (('acl_' + str(self.vlan_no)),
|
||||
netaddr.IPNetwork(self.gw_ip_cidr).network,
|
||||
netaddr.IPNetwork(self.gw_ip_cidr).hostmask,
|
||||
int_interface,
|
||||
ext_interface,
|
||||
self.vrf)
|
||||
|
||||
self.driver.enable_internal_network_NAT(self.ri, self.port,
|
||||
self.ex_gw_port)
|
||||
|
||||
self.driver._nat_rules_for_internet_access.assert_called_once_with(
|
||||
*args)
|
||||
|
||||
def test_enable_internal_network_NAT_with_confstring(self):
|
||||
self.driver._csr_conn.reset_mock()
|
||||
self.driver._check_acl = mock.Mock(return_value=False)
|
||||
int_interface = ('GigabitEthernet0' + '.' + str(self.vlan_no))
|
||||
ext_interface = 'GigabitEthernet1' + '.' + str(1000)
|
||||
acl_no = ('acl_' + str(self.vlan_no))
|
||||
int_network = netaddr.IPNetwork(self.gw_ip_cidr).network
|
||||
int_net_mask = netaddr.IPNetwork(self.gw_ip_cidr).hostmask
|
||||
|
||||
self.driver.enable_internal_network_NAT(self.ri, self.port,
|
||||
self.ex_gw_port)
|
||||
|
||||
self.assert_edit_running_config(
|
||||
snippets.CREATE_ACL, (acl_no, int_network, int_net_mask))
|
||||
self.assert_edit_running_config(
|
||||
snippets.SET_DYN_SRC_TRL_INTFC, (acl_no, ext_interface, self.vrf))
|
||||
self.assert_edit_running_config(
|
||||
snippets.SET_NAT, (int_interface, 'inside'))
|
||||
self.assert_edit_running_config(
|
||||
snippets.SET_NAT, (ext_interface, 'outside'))
|
||||
|
||||
def test_disable_internal_network_NAT(self):
|
||||
self.driver._remove_interface_nat = mock.MagicMock()
|
||||
self.driver._remove_dyn_nat_translations = mock.MagicMock()
|
||||
self.driver._remove_dyn_nat_rule = mock.MagicMock()
|
||||
int_interface = ('GigabitEthernet0' + '.' + str(self.vlan_no))
|
||||
ext_interface = 'GigabitEthernet1' + '.' + str(1000)
|
||||
self.driver.disable_internal_network_NAT(self.ri, self.port,
|
||||
self.ex_gw_port)
|
||||
args = (('acl_' + str(self.vlan_no)), ext_interface, self.vrf)
|
||||
|
||||
self.driver._remove_interface_nat.assert_called_once_with(
|
||||
int_interface, 'inside')
|
||||
self.driver._remove_dyn_nat_translations.assert_called_once_with()
|
||||
self.driver._remove_dyn_nat_rule.assert_called_once_with(*args)
|
||||
|
||||
def assert_edit_running_config(self, snippet_name, args):
|
||||
if args:
|
||||
confstr = snippet_name % args
|
||||
else:
|
||||
confstr = snippet_name
|
||||
self.driver._csr_conn.edit_config.assert_any_call(
|
||||
target='running', config=confstr)
|
||||
|
||||
def test_disable_internal_network_NAT_with_confstring(self):
|
||||
self.driver._cfg_exists = mock.Mock(return_value=True)
|
||||
int_interface = ('GigabitEthernet0' + '.' + str(self.vlan_no))
|
||||
ext_interface = 'GigabitEthernet1' + '.' + str(1000)
|
||||
acl_no = 'acl_' + str(self.vlan_no)
|
||||
self.driver.disable_internal_network_NAT(self.ri, self.port,
|
||||
self.ex_gw_port)
|
||||
|
||||
self.assert_edit_running_config(
|
||||
snippets.REMOVE_NAT, (int_interface, 'inside'))
|
||||
self.assert_edit_running_config(snippets.CLEAR_DYN_NAT_TRANS, None)
|
||||
self.assert_edit_running_config(
|
||||
snippets.REMOVE_DYN_SRC_TRL_INTFC, (acl_no, ext_interface,
|
||||
self.vrf))
|
||||
self.assert_edit_running_config(snippets.REMOVE_ACL, acl_no)
|
@ -1,189 +0,0 @@
|
||||
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import sys
|
||||
|
||||
import datetime
|
||||
import mock
|
||||
|
||||
from neutron.openstack.common import uuidutils
|
||||
|
||||
sys.modules['ncclient'] = mock.MagicMock()
|
||||
sys.modules['ciscoconfparse'] = mock.MagicMock()
|
||||
from neutron.plugins.cisco.cfg_agent import device_status
|
||||
from neutron.tests import base
|
||||
|
||||
_uuid = uuidutils.generate_uuid
|
||||
|
||||
TYPE_STRING = 'string'
|
||||
TYPE_DATETIME = 'datetime'
|
||||
NOW = 0
|
||||
BOOT_TIME = 420
|
||||
DEAD_TIME = 300
|
||||
BELOW_BOOT_TIME = 100
|
||||
|
||||
|
||||
def create_timestamp(seconds_from_now, type=TYPE_STRING):
|
||||
timedelta = datetime.timedelta(seconds=seconds_from_now)
|
||||
past_time = datetime.datetime.utcnow() - timedelta
|
||||
if type is TYPE_STRING:
|
||||
return past_time.strftime("%Y-%m-%dT%H:%M:%S.%f")
|
||||
if type is TYPE_DATETIME:
|
||||
return past_time
|
||||
|
||||
|
||||
class TestHostingDevice(base.BaseTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestHostingDevice, self).setUp()
|
||||
self.status = device_status.DeviceStatus()
|
||||
device_status._is_pingable = mock.MagicMock(return_value=True)
|
||||
|
||||
self.hosting_device = {'id': 123,
|
||||
'host_type': 'CSR1kv',
|
||||
'management_ip_address': '10.0.0.1',
|
||||
'port': '22',
|
||||
'booting_time': 420}
|
||||
self.created_at_str = datetime.datetime.utcnow().strftime(
|
||||
"%Y-%m-%d %H:%M:%S")
|
||||
self.hosting_device['created_at'] = self.created_at_str
|
||||
self.router_id = _uuid()
|
||||
self.router = {id: self.router_id,
|
||||
'hosting_device': self.hosting_device}
|
||||
|
||||
def test_hosting_devices_object(self):
|
||||
self.assertEqual({}, self.status.backlog_hosting_devices)
|
||||
|
||||
def test_is_hosting_device_reachable_positive(self):
|
||||
self.assertTrue(self.status.is_hosting_device_reachable(
|
||||
self.hosting_device))
|
||||
|
||||
def test_is_hosting_device_reachable_negative(self):
|
||||
self.assertEqual(0, len(self.status.backlog_hosting_devices))
|
||||
self.hosting_device['created_at'] = self.created_at_str # Back to str
|
||||
device_status._is_pingable.return_value = False
|
||||
|
||||
self.assertFalse(device_status._is_pingable('1.2.3.4'))
|
||||
self.assertIsNone(self.status.is_hosting_device_reachable(
|
||||
self.hosting_device))
|
||||
self.assertEqual(1, len(self.status.get_backlogged_hosting_devices()))
|
||||
self.assertTrue(123 in self.status.get_backlogged_hosting_devices())
|
||||
self.assertEqual(self.status.backlog_hosting_devices[123]['hd'],
|
||||
self.hosting_device)
|
||||
|
||||
def test_test_is_hosting_device_reachable_negative_exisiting_hd(self):
|
||||
self.status.backlog_hosting_devices.clear()
|
||||
self.status.backlog_hosting_devices[123] = {'hd': self.hosting_device}
|
||||
|
||||
self.assertEqual(1, len(self.status.backlog_hosting_devices))
|
||||
self.assertIsNone(self.status.is_hosting_device_reachable(
|
||||
self.hosting_device))
|
||||
self.assertEqual(1, len(self.status.get_backlogged_hosting_devices()))
|
||||
self.assertTrue(123 in self.status.backlog_hosting_devices.keys())
|
||||
self.assertEqual(self.status.backlog_hosting_devices[123]['hd'],
|
||||
self.hosting_device)
|
||||
|
||||
def test_check_backlog_empty(self):
|
||||
|
||||
expected = {'reachable': [],
|
||||
'dead': []}
|
||||
|
||||
self.assertEqual(expected,
|
||||
self.status.check_backlogged_hosting_devices())
|
||||
|
||||
def test_check_backlog_below_booting_time(self):
|
||||
expected = {'reachable': [],
|
||||
'dead': []}
|
||||
|
||||
self.hosting_device['created_at'] = create_timestamp(NOW)
|
||||
hd = self.hosting_device
|
||||
hd_id = hd['id']
|
||||
self.status.backlog_hosting_devices[hd_id] = {'hd': hd,
|
||||
'routers': [
|
||||
self.router_id]
|
||||
}
|
||||
|
||||
self.assertEqual(expected,
|
||||
self.status.check_backlogged_hosting_devices())
|
||||
|
||||
#Simulate 20 seconds before boot time finishes
|
||||
self.hosting_device['created_at'] = create_timestamp(BOOT_TIME - 20)
|
||||
self.assertEqual(self.status.check_backlogged_hosting_devices(),
|
||||
expected)
|
||||
|
||||
#Simulate 1 second before boot time
|
||||
self.hosting_device['created_at'] = create_timestamp(BOOT_TIME - 1)
|
||||
self.assertEqual(self.status.check_backlogged_hosting_devices(),
|
||||
expected)
|
||||
|
||||
def test_check_backlog_above_booting_time_pingable(self):
|
||||
"""Test for backlog processing after booting.
|
||||
|
||||
Simulates a hosting device which has passed the created time.
|
||||
The device should now be pingable.
|
||||
"""
|
||||
self.hosting_device['created_at'] = create_timestamp(BOOT_TIME + 10)
|
||||
hd = self.hosting_device
|
||||
hd_id = hd['id']
|
||||
device_status._is_pingable.return_value = True
|
||||
self.status.backlog_hosting_devices[hd_id] = {'hd': hd,
|
||||
'routers': [
|
||||
self.router_id]}
|
||||
expected = {'reachable': [hd_id],
|
||||
'dead': []}
|
||||
self.assertEqual(expected,
|
||||
self.status.check_backlogged_hosting_devices())
|
||||
|
||||
def test_check_backlog_above_BT_not_pingable_below_deadtime(self):
|
||||
"""Test for backlog processing in dead time interval.
|
||||
|
||||
This test simulates a hosting device which has passed the created
|
||||
time but less than the 'declared dead' time.
|
||||
Hosting device is still not pingable.
|
||||
"""
|
||||
hd = self.hosting_device
|
||||
hd['created_at'] = create_timestamp(BOOT_TIME + 10)
|
||||
#Inserted in backlog now
|
||||
hd['backlog_insertion_ts'] = create_timestamp(NOW, type=TYPE_DATETIME)
|
||||
hd_id = hd['id']
|
||||
device_status._is_pingable.return_value = False
|
||||
self.status.backlog_hosting_devices[hd_id] = {'hd': hd,
|
||||
'routers': [
|
||||
self.router_id]}
|
||||
expected = {'reachable': [],
|
||||
'dead': []}
|
||||
self.assertEqual(expected,
|
||||
self.status.check_backlogged_hosting_devices())
|
||||
|
||||
def test_check_backlog_above_BT_not_pingable_aboveDeadTime(self):
|
||||
"""Test for backlog processing after dead time interval.
|
||||
|
||||
This test simulates a hosting device which has passed the
|
||||
created time but greater than the 'declared dead' time.
|
||||
Hosting device is still not pingable.
|
||||
"""
|
||||
hd = self.hosting_device
|
||||
hd['created_at'] = create_timestamp(BOOT_TIME + DEAD_TIME + 10)
|
||||
#Inserted in backlog 5 seconds after booting time
|
||||
hd['backlog_insertion_ts'] = create_timestamp(BOOT_TIME + 5,
|
||||
type=TYPE_DATETIME)
|
||||
|
||||
hd_id = hd['id']
|
||||
device_status._is_pingable.return_value = False
|
||||
self.status.backlog_hosting_devices[hd_id] = {'hd': hd,
|
||||
'routers': [
|
||||
self.router_id]}
|
||||
expected = {'reachable': [],
|
||||
'dead': [hd_id]}
|
||||
self.assertEqual(expected,
|
||||
self.status.check_backlogged_hosting_devices())
|
@ -1,651 +0,0 @@
|
||||
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import copy
|
||||
import mock
|
||||
from oslo_config import cfg
|
||||
import oslo_messaging
|
||||
|
||||
from neutron.common import config as base_config
|
||||
from neutron.common import constants as l3_constants
|
||||
from neutron.openstack.common import uuidutils
|
||||
from neutron.plugins.cisco.cfg_agent import cfg_agent
|
||||
from neutron.plugins.cisco.cfg_agent import cfg_exceptions
|
||||
from neutron.plugins.cisco.cfg_agent.service_helpers.routing_svc_helper import(
|
||||
RouterInfo)
|
||||
from neutron.plugins.cisco.cfg_agent.service_helpers.routing_svc_helper import(
|
||||
RoutingServiceHelper)
|
||||
|
||||
|
||||
from neutron.tests import base
|
||||
|
||||
|
||||
_uuid = uuidutils.generate_uuid
|
||||
HOST = 'myhost'
|
||||
FAKE_ID = _uuid()
|
||||
|
||||
|
||||
def prepare_router_data(enable_snat=None, num_internal_ports=1):
|
||||
router_id = _uuid()
|
||||
ex_gw_port = {'id': _uuid(),
|
||||
'network_id': _uuid(),
|
||||
'fixed_ips': [{'ip_address': '19.4.4.4',
|
||||
'subnet_id': _uuid()}],
|
||||
'subnet': {'cidr': '19.4.4.0/24',
|
||||
'gateway_ip': '19.4.4.1'}}
|
||||
int_ports = []
|
||||
for i in range(num_internal_ports):
|
||||
int_ports.append({'id': _uuid(),
|
||||
'network_id': _uuid(),
|
||||
'admin_state_up': True,
|
||||
'fixed_ips': [{'ip_address': '35.4.%s.4' % i,
|
||||
'subnet_id': _uuid()}],
|
||||
'mac_address': 'ca:fe:de:ad:be:ef',
|
||||
'subnet': {'cidr': '35.4.%s.0/24' % i,
|
||||
'gateway_ip': '35.4.%s.1' % i}})
|
||||
hosting_device = {'id': _uuid(),
|
||||
"name": "CSR1kv_template",
|
||||
"booting_time": 300,
|
||||
"host_category": "VM",
|
||||
'management_ip_address': '20.0.0.5',
|
||||
'protocol_port': 22,
|
||||
"credentials": {
|
||||
"username": "user",
|
||||
"password": "4getme"},
|
||||
}
|
||||
router = {
|
||||
'id': router_id,
|
||||
'admin_state_up': True,
|
||||
l3_constants.INTERFACE_KEY: int_ports,
|
||||
'routes': [],
|
||||
'gw_port': ex_gw_port,
|
||||
'hosting_device': hosting_device}
|
||||
if enable_snat is not None:
|
||||
router['enable_snat'] = enable_snat
|
||||
return router, int_ports
|
||||
|
||||
|
||||
class TestRouterInfo(base.BaseTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestRouterInfo, self).setUp()
|
||||
self.ex_gw_port = {'id': _uuid(),
|
||||
'network_id': _uuid(),
|
||||
'fixed_ips': [{'ip_address': '19.4.4.4',
|
||||
'subnet_id': _uuid()}],
|
||||
'subnet': {'cidr': '19.4.4.0/24',
|
||||
'gateway_ip': '19.4.4.1'}}
|
||||
self.router = {'id': _uuid(),
|
||||
'enable_snat': True,
|
||||
'routes': [],
|
||||
'gw_port': self.ex_gw_port}
|
||||
|
||||
def test_router_info_create(self):
|
||||
router_id = _uuid()
|
||||
fake_router = {}
|
||||
ri = RouterInfo(router_id, fake_router)
|
||||
|
||||
self.assertTrue(ri.router_name().endswith(router_id))
|
||||
|
||||
def test_router_info_create_with_router(self):
|
||||
router_id = _uuid()
|
||||
ri = RouterInfo(router_id, self.router)
|
||||
self.assertTrue(ri.router_name().endswith(router_id))
|
||||
self.assertEqual(ri.router, self.router)
|
||||
self.assertEqual(ri._router, self.router)
|
||||
self.assertTrue(ri.snat_enabled)
|
||||
self.assertIsNone(ri.ex_gw_port)
|
||||
|
||||
def test_router_info_create_snat_disabled(self):
|
||||
router_id = _uuid()
|
||||
self.router['enable_snat'] = False
|
||||
ri = RouterInfo(router_id, self.router)
|
||||
self.assertFalse(ri.snat_enabled)
|
||||
|
||||
|
||||
class TestBasicRoutingOperations(base.BaseTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(TestBasicRoutingOperations, self).setUp()
|
||||
self.conf = cfg.ConfigOpts()
|
||||
self.conf.register_opts(base_config.core_opts)
|
||||
self.conf.register_opts(cfg_agent.CiscoCfgAgent.OPTS)
|
||||
self.ex_gw_port = {'id': _uuid(),
|
||||
'network_id': _uuid(),
|
||||
'fixed_ips': [{'ip_address': '19.4.4.4',
|
||||
'subnet_id': _uuid()}],
|
||||
'subnet': {'cidr': '19.4.4.0/24',
|
||||
'gateway_ip': '19.4.4.1'}}
|
||||
self.hosting_device = {'id': "100",
|
||||
'name': "CSR1kv_template",
|
||||
'booting_time': 300,
|
||||
'host_category': "VM",
|
||||
'management_ip_address': '20.0.0.5',
|
||||
'protocol_port': 22,
|
||||
'credentials': {'username': 'user',
|
||||
"password": '4getme'},
|
||||
}
|
||||
self.router = {
|
||||
'id': _uuid(),
|
||||
'enable_snat': True,
|
||||
'routes': [],
|
||||
'gw_port': self.ex_gw_port,
|
||||
'hosting_device': self.hosting_device}
|
||||
|
||||
self.agent = mock.Mock()
|
||||
|
||||
#Patches & Mocks
|
||||
|
||||
self.l3pluginApi_cls_p = mock.patch(
|
||||
'neutron.plugins.cisco.cfg_agent.service_helpers.'
|
||||
'routing_svc_helper.CiscoRoutingPluginApi')
|
||||
l3plugin_api_cls = self.l3pluginApi_cls_p.start()
|
||||
self.plugin_api = mock.Mock()
|
||||
l3plugin_api_cls.return_value = self.plugin_api
|
||||
self.plugin_api.get_routers = mock.MagicMock()
|
||||
self.looping_call_p = mock.patch(
|
||||
'neutron.openstack.common.loopingcall.FixedIntervalLoopingCall')
|
||||
self.looping_call_p.start()
|
||||
mock.patch('neutron.common.rpc.create_connection').start()
|
||||
|
||||
self.routing_helper = RoutingServiceHelper(
|
||||
HOST, self.conf, self.agent)
|
||||
self.routing_helper._internal_network_added = mock.Mock()
|
||||
self.routing_helper._external_gateway_added = mock.Mock()
|
||||
self.routing_helper._internal_network_removed = mock.Mock()
|
||||
self.routing_helper._external_gateway_removed = mock.Mock()
|
||||
self.driver = self._mock_driver_and_hosting_device(
|
||||
self.routing_helper)
|
||||
|
||||
def _mock_driver_and_hosting_device(self, svc_helper):
|
||||
svc_helper._dev_status.is_hosting_device_reachable = mock.MagicMock(
|
||||
return_value=True)
|
||||
driver = mock.MagicMock()
|
||||
svc_helper._drivermgr.get_driver = mock.Mock(return_value=driver)
|
||||
svc_helper._drivermgr.set_driver = mock.Mock(return_value=driver)
|
||||
return driver
|
||||
|
||||
def _reset_mocks(self):
|
||||
self.routing_helper._process_router_floating_ips.reset_mock()
|
||||
self.routing_helper._internal_network_added.reset_mock()
|
||||
self.routing_helper._external_gateway_added.reset_mock()
|
||||
self.routing_helper._internal_network_removed.reset_mock()
|
||||
self.routing_helper._external_gateway_removed.reset_mock()
|
||||
|
||||
def test_process_router_throw_config_error(self):
|
||||
snip_name = 'CREATE_SUBINTERFACE'
|
||||
e_type = 'Fake error'
|
||||
e_tag = 'Fake error tag'
|
||||
params = {'snippet': snip_name, 'type': e_type, 'tag': e_tag}
|
||||
self.routing_helper._internal_network_added.side_effect = (
|
||||
cfg_exceptions.CSR1kvConfigException(**params))
|
||||
router, ports = prepare_router_data()
|
||||
ri = RouterInfo(router['id'], router)
|
||||
self.assertRaises(cfg_exceptions.CSR1kvConfigException,
|
||||
self.routing_helper._process_router, ri)
|
||||
|
||||
def test_process_router(self):
|
||||
router, ports = prepare_router_data()
|
||||
#Setup mock for call to proceess floating ips
|
||||
self.routing_helper._process_router_floating_ips = mock.Mock()
|
||||
fake_floatingips1 = {'floatingips': [
|
||||
{'id': _uuid(),
|
||||
'floating_ip_address': '8.8.8.8',
|
||||
'fixed_ip_address': '7.7.7.7',
|
||||
'port_id': _uuid()}]}
|
||||
ri = RouterInfo(router['id'], router=router)
|
||||
# Process with initial values
|
||||
self.routing_helper._process_router(ri)
|
||||
ex_gw_port = ri.router.get('gw_port')
|
||||
# Assert that process_floating_ips, internal_network & external network
|
||||
# added were all called with the right params
|
||||
self.routing_helper._process_router_floating_ips.assert_called_with(
|
||||
ri, ex_gw_port)
|
||||
self.routing_helper._internal_network_added.assert_called_with(
|
||||
ri, ports[0], ex_gw_port)
|
||||
self.routing_helper._external_gateway_added.assert_called_with(
|
||||
ri, ex_gw_port)
|
||||
self._reset_mocks()
|
||||
# remap floating IP to a new fixed ip
|
||||
fake_floatingips2 = copy.deepcopy(fake_floatingips1)
|
||||
fake_floatingips2['floatingips'][0]['fixed_ip_address'] = '7.7.7.8'
|
||||
router[l3_constants.FLOATINGIP_KEY] = fake_floatingips2['floatingips']
|
||||
|
||||
# Process again and check that this time only the process_floating_ips
|
||||
# was only called.
|
||||
self.routing_helper._process_router(ri)
|
||||
ex_gw_port = ri.router.get('gw_port')
|
||||
self.routing_helper._process_router_floating_ips.assert_called_with(
|
||||
ri, ex_gw_port)
|
||||
self.assertFalse(self.routing_helper._internal_network_added.called)
|
||||
self.assertFalse(self.routing_helper._external_gateway_added.called)
|
||||
self._reset_mocks()
|
||||
# remove just the floating ips
|
||||
del router[l3_constants.FLOATINGIP_KEY]
|
||||
# Process again and check that this time also only the
|
||||
# process_floating_ips and external_network remove was called
|
||||
self.routing_helper._process_router(ri)
|
||||
ex_gw_port = ri.router.get('gw_port')
|
||||
self.routing_helper._process_router_floating_ips.assert_called_with(
|
||||
ri, ex_gw_port)
|
||||
self.assertFalse(self.routing_helper._internal_network_added.called)
|
||||
self.assertFalse(self.routing_helper._external_gateway_added.called)
|
||||
self._reset_mocks()
|
||||
# now no ports so state is torn down
|
||||
del router[l3_constants.INTERFACE_KEY]
|
||||
del router['gw_port']
|
||||
# Update router_info object
|
||||
ri.router = router
|
||||
# Keep a copy of the ex_gw_port before its gone after processing.
|
||||
ex_gw_port = ri.ex_gw_port
|
||||
# Process router and verify that internal and external network removed
|
||||
# were called and floating_ips_process was called
|
||||
self.routing_helper._process_router(ri)
|
||||
self.assertFalse(self.routing_helper.
|
||||
_process_router_floating_ips.called)
|
||||
self.assertFalse(self.routing_helper._external_gateway_added.called)
|
||||
self.assertTrue(self.routing_helper._internal_network_removed.called)
|
||||
self.assertTrue(self.routing_helper._external_gateway_removed.called)
|
||||
self.routing_helper._internal_network_removed.assert_called_with(
|
||||
ri, ports[0], ex_gw_port)
|
||||
self.routing_helper._external_gateway_removed.assert_called_with(
|
||||
ri, ex_gw_port)
|
||||
|
||||
def test_routing_table_update(self):
|
||||
router = self.router
|
||||
fake_route1 = {'destination': '135.207.0.0/16',
|
||||
'nexthop': '1.2.3.4'}
|
||||
fake_route2 = {'destination': '135.207.111.111/32',
|
||||
'nexthop': '1.2.3.4'}
|
||||
|
||||
# First we set the routes to fake_route1 and see if the
|
||||
# driver.routes_updated was called with 'replace'(==add or replace)
|
||||
# and fake_route1
|
||||
router['routes'] = [fake_route1]
|
||||
ri = RouterInfo(router['id'], router)
|
||||
self.routing_helper._process_router(ri)
|
||||
|
||||
self.driver.routes_updated.assert_called_with(ri, 'replace',
|
||||
fake_route1)
|
||||
|
||||
# Now we replace fake_route1 with fake_route2. This should cause driver
|
||||
# to be invoked to delete fake_route1 and 'replace'(==add or replace)
|
||||
self.driver.reset_mock()
|
||||
router['routes'] = [fake_route2]
|
||||
ri.router = router
|
||||
self.routing_helper._process_router(ri)
|
||||
|
||||
self.driver.routes_updated.assert_called_with(ri, 'delete',
|
||||
fake_route1)
|
||||
self.driver.routes_updated.assert_any_call(ri, 'replace', fake_route2)
|
||||
|
||||
# Now we add back fake_route1 as a new route, this should cause driver
|
||||
# to be invoked to 'replace'(==add or replace) fake_route1
|
||||
self.driver.reset_mock()
|
||||
router['routes'] = [fake_route2, fake_route1]
|
||||
ri.router = router
|
||||
self.routing_helper._process_router(ri)
|
||||
|
||||
self.driver.routes_updated.assert_any_call(ri, 'replace', fake_route1)
|
||||
|
||||
# Now we delete all routes. This should cause driver
|
||||
# to be invoked to delete fake_route1 and fake-route2
|
||||
self.driver.reset_mock()
|
||||
router['routes'] = []
|
||||
ri.router = router
|
||||
self.routing_helper._process_router(ri)
|
||||
|
||||
self.driver.routes_updated.assert_any_call(ri, 'delete', fake_route2)
|
||||
self.driver.routes_updated.assert_any_call(ri, 'delete', fake_route1)
|
||||
|
||||
def test_process_router_internal_network_added_unexpected_error(self):
|
||||
router, ports = prepare_router_data()
|
||||
ri = RouterInfo(router['id'], router=router)
|
||||
# raise RuntimeError to simulate that an unexpected exception occurrs
|
||||
self.routing_helper._internal_network_added.side_effect = RuntimeError
|
||||
self.assertRaises(RuntimeError,
|
||||
self.routing_helper._process_router,
|
||||
ri)
|
||||
self.assertNotIn(
|
||||
router[l3_constants.INTERFACE_KEY][0], ri.internal_ports)
|
||||
|
||||
# The unexpected exception has been fixed manually
|
||||
self.routing_helper._internal_network_added.side_effect = None
|
||||
|
||||
# Failure will cause a retry next time, then were able to add the
|
||||
# port to ri.internal_ports
|
||||
self.routing_helper._process_router(ri)
|
||||
self.assertIn(
|
||||
router[l3_constants.INTERFACE_KEY][0], ri.internal_ports)
|
||||
|
||||
def test_process_router_internal_network_removed_unexpected_error(self):
|
||||
router, ports = prepare_router_data()
|
||||
ri = RouterInfo(router['id'], router=router)
|
||||
# add an internal port
|
||||
self.routing_helper._process_router(ri)
|
||||
|
||||
# raise RuntimeError to simulate that an unexpected exception occurrs
|
||||
|
||||
self.routing_helper._internal_network_removed.side_effect = mock.Mock(
|
||||
side_effect=RuntimeError)
|
||||
ri.internal_ports[0]['admin_state_up'] = False
|
||||
# The above port is set to down state, remove it.
|
||||
self.assertRaises(RuntimeError,
|
||||
self.routing_helper._process_router,
|
||||
ri)
|
||||
self.assertIn(
|
||||
router[l3_constants.INTERFACE_KEY][0], ri.internal_ports)
|
||||
|
||||
# The unexpected exception has been fixed manually
|
||||
self.routing_helper._internal_network_removed.side_effect = None
|
||||
|
||||
# Failure will cause a retry next time,
|
||||
# We were able to add the port to ri.internal_ports
|
||||
self.routing_helper._process_router(ri)
|
||||
# We were able to remove the port from ri.internal_ports
|
||||
self.assertNotIn(
|
||||
router[l3_constants.INTERFACE_KEY][0], ri.internal_ports)
|
||||
|
||||
def test_routers_with_admin_state_down(self):
|
||||
self.plugin_api.get_external_network_id.return_value = None
|
||||
|
||||
routers = [
|
||||
{'id': _uuid(),
|
||||
'admin_state_up': False,
|
||||
'external_gateway_info': {}}]
|
||||
self.routing_helper._process_routers(routers, None)
|
||||
self.assertNotIn(routers[0]['id'], self.routing_helper.router_info)
|
||||
|
||||
def test_router_deleted(self):
|
||||
self.routing_helper.router_deleted(None, [FAKE_ID])
|
||||
self.assertIn(FAKE_ID, self.routing_helper.removed_routers)
|
||||
|
||||
def test_routers_updated(self):
|
||||
self.routing_helper.routers_updated(None, [FAKE_ID])
|
||||
self.assertIn(FAKE_ID, self.routing_helper.updated_routers)
|
||||
|
||||
def test_removed_from_agent(self):
|
||||
self.routing_helper.router_removed_from_agent(None,
|
||||
{'router_id': FAKE_ID})
|
||||
self.assertIn(FAKE_ID, self.routing_helper.removed_routers)
|
||||
|
||||
def test_added_to_agent(self):
|
||||
self.routing_helper.router_added_to_agent(None, [FAKE_ID])
|
||||
self.assertIn(FAKE_ID, self.routing_helper.updated_routers)
|
||||
|
||||
def test_process_router_delete(self):
|
||||
router = self.router
|
||||
router['gw_port'] = self.ex_gw_port
|
||||
self.routing_helper._router_added(router['id'], router)
|
||||
self.assertIn(router['id'], self.routing_helper.router_info)
|
||||
# Now we remove the router
|
||||
self.routing_helper._router_removed(router['id'], deconfigure=True)
|
||||
self.assertNotIn(router['id'], self.routing_helper.router_info)
|
||||
|
||||
def test_collect_state(self):
|
||||
router, ports = prepare_router_data(enable_snat=True,
|
||||
num_internal_ports=2)
|
||||
self.routing_helper._router_added(router['id'], router)
|
||||
|
||||
configurations = {}
|
||||
configurations = self.routing_helper.collect_state(configurations)
|
||||
hd_exp_result = {
|
||||
router['hosting_device']['id']: {'routers': 1}}
|
||||
self.assertEqual(1, configurations['total routers'])
|
||||
self.assertEqual(1, configurations['total ex_gw_ports'])
|
||||
self.assertEqual(2, configurations['total interfaces'])
|
||||
self.assertEqual(0, configurations['total floating_ips'])
|
||||
self.assertEqual(hd_exp_result, configurations['hosting_devices'])
|
||||
self.assertEqual([], configurations['non_responding_hosting_devices'])
|
||||
|
||||
def test_sort_resources_per_hosting_device(self):
|
||||
router1, port = prepare_router_data()
|
||||
router2, port = prepare_router_data()
|
||||
router3, port = prepare_router_data()
|
||||
router4, port = prepare_router_data()
|
||||
|
||||
hd1_id = router1['hosting_device']['id']
|
||||
hd2_id = router4['hosting_device']['id']
|
||||
#Setting router2 and router3 device id same as router1's device id
|
||||
router2['hosting_device']['id'] = hd1_id
|
||||
router3['hosting_device']['id'] = hd1_id
|
||||
|
||||
resources = {'routers': [router1, router2, router4],
|
||||
'removed_routers': [router3]}
|
||||
devices = self.routing_helper._sort_resources_per_hosting_device(
|
||||
resources)
|
||||
|
||||
self.assertEqual(2, len(devices.keys())) # Two devices
|
||||
hd1_routers = [router1, router2]
|
||||
self.assertEqual(hd1_routers, devices[hd1_id]['routers'])
|
||||
self.assertEqual([router3], devices[hd1_id]['removed_routers'])
|
||||
self.assertEqual([router4], devices[hd2_id]['routers'])
|
||||
|
||||
def test_get_router_ids_from_removed_devices_info(self):
|
||||
removed_devices_info = {
|
||||
'hosting_data': {'device_1': {'routers': ['id1', 'id2']},
|
||||
'device_2': {'routers': ['id3', 'id4'],
|
||||
'other_key': ['value1', 'value2']}}
|
||||
}
|
||||
resp = self.routing_helper._get_router_ids_from_removed_devices_info(
|
||||
removed_devices_info)
|
||||
self.assertEqual(sorted(resp), sorted(['id1', 'id2', 'id3', 'id4']))
|
||||
|
||||
@mock.patch("eventlet.GreenPool.spawn_n")
|
||||
def test_process_services_full_sync_different_devices(self, mock_spawn):
|
||||
router1, port = prepare_router_data()
|
||||
router2, port = prepare_router_data()
|
||||
self.plugin_api.get_routers = mock.Mock(
|
||||
return_value=[router1, router2])
|
||||
self.routing_helper.process_service()
|
||||
self.assertEqual(2, mock_spawn.call_count)
|
||||
call1 = mock.call(self.routing_helper._process_routers, [router1],
|
||||
None, router1['hosting_device']['id'],
|
||||
all_routers=True)
|
||||
call2 = mock.call(self.routing_helper._process_routers, [router2],
|
||||
None, router2['hosting_device']['id'],
|
||||
all_routers=True)
|
||||
mock_spawn.assert_has_calls([call1, call2], any_order=True)
|
||||
|
||||
@mock.patch("eventlet.GreenPool.spawn_n")
|
||||
def test_process_services_full_sync_same_device(self, mock_spawn):
|
||||
router1, port = prepare_router_data()
|
||||
router2, port = prepare_router_data()
|
||||
router2['hosting_device']['id'] = router1['hosting_device']['id']
|
||||
self.plugin_api.get_routers = mock.Mock(return_value=[router1,
|
||||
router2])
|
||||
self.routing_helper.process_service()
|
||||
self.assertEqual(1, mock_spawn.call_count)
|
||||
mock_spawn.assert_called_with(self.routing_helper._process_routers,
|
||||
[router1, router2],
|
||||
None,
|
||||
router1['hosting_device']['id'],
|
||||
all_routers=True)
|
||||
|
||||
@mock.patch("eventlet.GreenPool.spawn_n")
|
||||
def test_process_services_with_updated_routers(self, mock_spawn):
|
||||
|
||||
router1, port = prepare_router_data()
|
||||
|
||||
def routers_data(context, router_ids=None, hd_ids=None):
|
||||
if router_ids:
|
||||
return [router1]
|
||||
self.plugin_api.get_routers.side_effect = routers_data
|
||||
|
||||
self.routing_helper.fullsync = False
|
||||
self.routing_helper.updated_routers.add(router1['id'])
|
||||
self.routing_helper.process_service()
|
||||
self.assertEqual(1, self.plugin_api.get_routers.call_count)
|
||||
self.plugin_api.get_routers.assert_called_with(
|
||||
self.routing_helper.context,
|
||||
router_ids=[router1['id']])
|
||||
self.assertEqual(1, mock_spawn.call_count)
|
||||
mock_spawn.assert_called_with(self.routing_helper._process_routers,
|
||||
[router1],
|
||||
None,
|
||||
router1['hosting_device']['id'],
|
||||
all_routers=False)
|
||||
|
||||
@mock.patch("eventlet.GreenPool.spawn_n")
|
||||
def test_process_services_with_deviceid(self, mock_spawn):
|
||||
|
||||
router, port = prepare_router_data()
|
||||
device_id = router['hosting_device']['id']
|
||||
|
||||
def routers_data(context, router_ids=None, hd_ids=None):
|
||||
if hd_ids:
|
||||
self.assertEqual([device_id], hd_ids)
|
||||
return [router]
|
||||
|
||||
self.plugin_api.get_routers.side_effect = routers_data
|
||||
self.routing_helper.fullsync = False
|
||||
self.routing_helper.process_service(device_ids=[device_id])
|
||||
self.assertEqual(1, self.plugin_api.get_routers.call_count)
|
||||
self.plugin_api.get_routers.assert_called_with(
|
||||
self.routing_helper.context,
|
||||
hd_ids=[device_id])
|
||||
self.assertEqual(1, mock_spawn.call_count)
|
||||
mock_spawn.assert_called_with(self.routing_helper._process_routers,
|
||||
[router],
|
||||
None,
|
||||
device_id,
|
||||
all_routers=False)
|
||||
|
||||
@mock.patch("eventlet.GreenPool.spawn_n")
|
||||
def test_process_services_with_removed_routers(self, mock_spawn):
|
||||
router, port = prepare_router_data()
|
||||
device_id = router['hosting_device']['id']
|
||||
|
||||
self._mock_driver_and_hosting_device(self.routing_helper)
|
||||
self.routing_helper.fullsync = False
|
||||
# Emulate router added for setting up internal structures
|
||||
self.routing_helper._router_added(router['id'], router)
|
||||
# Add router to removed routers list and process it
|
||||
self.routing_helper.removed_routers.add(router['id'])
|
||||
self.routing_helper.process_service()
|
||||
|
||||
self.assertEqual(1, mock_spawn.call_count)
|
||||
mock_spawn.assert_called_with(self.routing_helper._process_routers,
|
||||
None,
|
||||
[router],
|
||||
device_id,
|
||||
all_routers=False)
|
||||
|
||||
@mock.patch("eventlet.GreenPool.spawn_n")
|
||||
def test_process_services_with_removed_routers_info(self, mock_spawn):
|
||||
router1, port = prepare_router_data()
|
||||
device_id = router1['hosting_device']['id']
|
||||
router2, port = prepare_router_data()
|
||||
router2['hosting_device']['id'] = _uuid()
|
||||
|
||||
removed_devices_info = {
|
||||
'hosting_data': {device_id: {'routers': [router1['id']]}},
|
||||
'deconfigure': True
|
||||
}
|
||||
|
||||
self._mock_driver_and_hosting_device(self.routing_helper)
|
||||
self.routing_helper.fullsync = False
|
||||
# Emulate router added for setting up internal structures
|
||||
self.routing_helper._router_added(router1['id'], router1)
|
||||
self.routing_helper._router_added(router2['id'], router2)
|
||||
# Add router to removed routers list and process it
|
||||
self.routing_helper.removed_routers.add(router2['id'])
|
||||
self.routing_helper.process_service(
|
||||
removed_devices_info=removed_devices_info)
|
||||
|
||||
self.assertEqual(2, mock_spawn.call_count)
|
||||
call1 = mock.call(self.routing_helper._process_routers,
|
||||
None,
|
||||
[router1],
|
||||
router1['hosting_device']['id'],
|
||||
all_routers=False)
|
||||
call2 = mock.call(self.routing_helper._process_routers,
|
||||
None,
|
||||
[router2],
|
||||
router2['hosting_device']['id'],
|
||||
all_routers=False)
|
||||
mock_spawn.assert_has_calls([call1, call2], any_order=True)
|
||||
|
||||
@mock.patch("eventlet.GreenPool.spawn_n")
|
||||
def test_process_services_with_rpc_error(self, mock_spawn):
|
||||
router, port = prepare_router_data()
|
||||
get_routers = self.plugin_api.get_routers
|
||||
get_routers.side_effect = oslo_messaging.MessagingException
|
||||
self.routing_helper.fullsync = False
|
||||
self.routing_helper.updated_routers.add(router['id'])
|
||||
self.routing_helper.process_service()
|
||||
self.assertEqual(1, get_routers.call_count)
|
||||
get_routers.assert_called_with(
|
||||
self.routing_helper.context,
|
||||
router_ids=[router['id']])
|
||||
self.assertFalse(mock_spawn.called)
|
||||
self.assertTrue(self.routing_helper.fullsync)
|
||||
|
||||
def test_process_routers(self):
|
||||
router, port = prepare_router_data()
|
||||
driver = self._mock_driver_and_hosting_device(self.routing_helper)
|
||||
self.routing_helper._process_router = mock.Mock()
|
||||
self.routing_helper._process_routers([router], None)
|
||||
ri = self.routing_helper.router_info[router['id']]
|
||||
driver.router_added.assert_called_with(ri)
|
||||
self.routing_helper._process_router.assert_called_with(ri)
|
||||
|
||||
def _process_routers_floatingips(self, action='add'):
|
||||
router, port = prepare_router_data()
|
||||
driver = self._mock_driver_and_hosting_device(self.routing_helper)
|
||||
ex_gw_port = router['gw_port']
|
||||
floating_ip_address = '19.4.4.10'
|
||||
fixed_ip_address = '35.4.1.10'
|
||||
fixed_ip_address_2 = '35.4.1.15'
|
||||
port_id = 'fake_port_id'
|
||||
floating_ip = {'fixed_ip_address': fixed_ip_address,
|
||||
'floating_ip_address': floating_ip_address,
|
||||
'id': 'floating_ip_id',
|
||||
'port_id': port_id,
|
||||
'status': 'ACTIVE', }
|
||||
router[l3_constants.FLOATINGIP_KEY] = [floating_ip]
|
||||
ri = RouterInfo(router['id'], router=router)
|
||||
|
||||
# Default add action
|
||||
self.routing_helper._process_router_floating_ips(ri, ex_gw_port)
|
||||
driver.floating_ip_added.assert_called_with(
|
||||
ri, ex_gw_port, floating_ip_address, fixed_ip_address)
|
||||
|
||||
if action == 'remove':
|
||||
router[l3_constants.FLOATINGIP_KEY] = []
|
||||
self.routing_helper._process_router_floating_ips(ri, ex_gw_port)
|
||||
driver.floating_ip_removed.assert_called_with(
|
||||
ri, ri.ex_gw_port, floating_ip_address, fixed_ip_address)
|
||||
|
||||
if action == 'remap':
|
||||
driver.reset_mock()
|
||||
floating_ip_2 = copy.deepcopy(floating_ip)
|
||||
floating_ip_2['fixed_ip_address'] = fixed_ip_address_2
|
||||
ri.router[l3_constants.FLOATINGIP_KEY] = [floating_ip_2]
|
||||
|
||||
self.routing_helper._process_router_floating_ips(ri, ex_gw_port)
|
||||
driver.floating_ip_added.assert_called_with(
|
||||
ri, ri.ex_gw_port, floating_ip_address, fixed_ip_address_2)
|
||||
|
||||
driver.floating_ip_removed.assert_called_with(
|
||||
ri, ri.ex_gw_port, floating_ip_address, fixed_ip_address)
|
||||
|
||||
def test_process_routers_floatingips_add(self):
|
||||
self._process_routers_floatingips(action="add")
|
||||
|
||||
def test_process_routers_floatingips_remove(self):
|
||||
self._process_routers_floatingips(action="remove")
|
||||
|
||||
def test_process_routers_floatingips_remap(self):
|
||||
self._process_routers_floatingips(action="remap")
|
@ -1,151 +0,0 @@
|
||||
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import mock
|
||||
from novaclient import exceptions as nova_exc
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import excutils
|
||||
|
||||
from neutron import context as n_context
|
||||
from neutron.i18n import _LE
|
||||
from neutron import manager
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.openstack.common import uuidutils
|
||||
from neutron.plugins.common import constants
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
_uuid = uuidutils.generate_uuid
|
||||
|
||||
|
||||
class DeviceHandlingTestSupportMixin(object):
|
||||
|
||||
@property
|
||||
def _core_plugin(self):
|
||||
return manager.NeutronManager.get_plugin()
|
||||
|
||||
def _mock_l3_admin_tenant(self):
|
||||
# Mock l3 admin tenant
|
||||
self.tenant_id_fcn_p = mock.patch(
|
||||
'neutron.plugins.cisco.db.l3.device_handling_db.'
|
||||
'DeviceHandlingMixin.l3_tenant_id')
|
||||
self.tenant_id_fcn = self.tenant_id_fcn_p.start()
|
||||
self.tenant_id_fcn.return_value = "L3AdminTenantId"
|
||||
|
||||
def _create_mgmt_nw_for_tests(self, fmt):
|
||||
self._mgmt_nw = self._make_network(fmt,
|
||||
cfg.CONF.general.management_network,
|
||||
True, tenant_id="L3AdminTenantId",
|
||||
shared=False)
|
||||
self._mgmt_subnet = self._make_subnet(fmt, self._mgmt_nw,
|
||||
"10.0.100.1", "10.0.100.0/24",
|
||||
ip_version=4)
|
||||
|
||||
def _remove_mgmt_nw_for_tests(self):
|
||||
q_p = "network_id=%s" % self._mgmt_nw['network']['id']
|
||||
subnets = self._list('subnets', query_params=q_p)
|
||||
if subnets:
|
||||
for p in self._list('ports', query_params=q_p).get('ports'):
|
||||
self._delete('ports', p['id'])
|
||||
self._delete('subnets', self._mgmt_subnet['subnet']['id'])
|
||||
self._delete('networks', self._mgmt_nw['network']['id'])
|
||||
|
||||
# Function used to mock novaclient services list
|
||||
def _novaclient_services_list(self, all=True):
|
||||
services = set(['nova-conductor', 'nova-cert', 'nova-scheduler',
|
||||
'nova-compute', 'nova-consoleauth'])
|
||||
full_list = [FakeResource(binary=res) for res in services]
|
||||
_all = all
|
||||
|
||||
def response():
|
||||
if _all:
|
||||
return full_list
|
||||
else:
|
||||
return full_list[2:]
|
||||
return response
|
||||
|
||||
# Function used to mock novaclient servers create
|
||||
def _novaclient_servers_create(self, instance_name, image_id, flavor_id,
|
||||
nics, files, config_drive):
|
||||
fake_vm = FakeResource()
|
||||
for nic in nics:
|
||||
p_dict = {'port': {'device_id': fake_vm.id,
|
||||
'device_owner': 'nova'}}
|
||||
self._core_plugin.update_port(n_context.get_admin_context(),
|
||||
nic['port-id'], p_dict)
|
||||
return fake_vm
|
||||
|
||||
# Function used to mock novaclient servers delete
|
||||
def _novaclient_servers_delete(self, vm_id):
|
||||
q_p = "device_id=%s" % vm_id
|
||||
ports = self._list('ports', query_params=q_p)
|
||||
for port in ports.get('ports', []):
|
||||
try:
|
||||
self._delete('ports', port['id'])
|
||||
except Exception as e:
|
||||
with excutils.save_and_reraise_exception(reraise=False):
|
||||
LOG.error(_LE('Failed to delete port %(p_id)s for vm '
|
||||
'instance %(v_id)s due to %(err)s'),
|
||||
{'p_id': port['id'], 'v_id': vm_id, 'err': e})
|
||||
raise nova_exc.InternalServerError()
|
||||
|
||||
def _mock_svc_vm_create_delete(self, plugin):
|
||||
# Mock novaclient methods for creation/deletion of service VMs
|
||||
mock.patch(
|
||||
'neutron.plugins.cisco.l3.service_vm_lib.n_utils.find_resource',
|
||||
lambda *args, **kw: FakeResource()).start()
|
||||
self._nclient_services_mock = mock.MagicMock()
|
||||
self._nclient_services_mock.list = self._novaclient_services_list()
|
||||
mock.patch.object(plugin._svc_vm_mgr._nclient, 'services',
|
||||
self._nclient_services_mock).start()
|
||||
nclient_servers_mock = mock.MagicMock()
|
||||
nclient_servers_mock.create = self._novaclient_servers_create
|
||||
nclient_servers_mock.delete = self._novaclient_servers_delete
|
||||
mock.patch.object(plugin._svc_vm_mgr._nclient, 'servers',
|
||||
nclient_servers_mock).start()
|
||||
|
||||
def _mock_io_file_ops(self):
|
||||
# Mock library functions for config drive file operations
|
||||
cfg_template = '\n'.join(['interface GigabitEthernet1',
|
||||
'ip address <ip> <mask>',
|
||||
'no shutdown'])
|
||||
m = mock.mock_open(read_data=cfg_template)
|
||||
m.return_value.__iter__.return_value = cfg_template.splitlines()
|
||||
mock.patch('neutron.plugins.cisco.l3.hosting_device_drivers.'
|
||||
'csr1kv_hd_driver.open', m, create=True).start()
|
||||
|
||||
def _test_remove_all_hosting_devices(self):
|
||||
"""Removes all hosting devices created during a test."""
|
||||
plugin = manager.NeutronManager.get_service_plugins()[
|
||||
constants.L3_ROUTER_NAT]
|
||||
context = n_context.get_admin_context()
|
||||
plugin.delete_all_hosting_devices(context, True)
|
||||
|
||||
def _get_fake_resource(self, tenant_id=None, id=None):
|
||||
return {'id': id or _uuid(),
|
||||
'tenant_id': tenant_id or _uuid()}
|
||||
|
||||
def _get_test_context(self, user_id=None, tenant_id=None, is_admin=False):
|
||||
return n_context.Context(user_id, tenant_id, is_admin,
|
||||
load_admin_roles=True)
|
||||
|
||||
|
||||
# Used to fake Glance images, Nova VMs and Nova services
|
||||
class FakeResource(object):
|
||||
def __init__(self, id=None, enabled='enabled', state='up', binary=None):
|
||||
self.id = id or _uuid()
|
||||
self.status = enabled
|
||||
self.state = state
|
||||
self.binary = binary
|
@ -1,352 +0,0 @@
|
||||
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import mock
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import timeutils
|
||||
from webob import exc
|
||||
|
||||
import neutron
|
||||
from neutron.api.v2 import attributes
|
||||
from neutron import context as n_context
|
||||
from neutron.db import agents_db
|
||||
from neutron.db import common_db_mixin
|
||||
from neutron.extensions import providernet as pnet
|
||||
from neutron import manager
|
||||
from neutron.plugins.cisco.common import cisco_constants as c_constants
|
||||
from neutron.plugins.cisco.db.l3 import device_handling_db
|
||||
from neutron.plugins.cisco.db.l3 import l3_router_appliance_db
|
||||
from neutron.plugins.cisco.l3.rpc import devices_cfgagent_rpc_cb
|
||||
from neutron.plugins.cisco.l3.rpc import l3_router_cfgagent_rpc_cb
|
||||
from neutron.plugins.cisco.l3 import service_vm_lib
|
||||
from neutron.plugins.common import constants as service_constants
|
||||
from neutron.tests.unit.cisco.l3 import device_handling_test_support
|
||||
from neutron.tests.unit import test_db_plugin
|
||||
from neutron.tests.unit import test_extension_extraroute as test_ext_extraroute
|
||||
from neutron.tests.unit import test_l3_plugin
|
||||
from neutron.tests.unit import testlib_plugin
|
||||
|
||||
|
||||
CORE_PLUGIN_KLASS = ('neutron.tests.unit.cisco.l3.'
|
||||
'test_l3_router_appliance_plugin.TestNoL3NatPlugin')
|
||||
L3_PLUGIN_KLASS = (
|
||||
"neutron.tests.unit.cisco.l3.test_l3_router_appliance_plugin."
|
||||
"TestApplianceL3RouterServicePlugin")
|
||||
extensions_path = neutron.plugins.__path__[0] + '/cisco/extensions'
|
||||
|
||||
|
||||
class L3RouterApplianceTestExtensionManager(
|
||||
test_ext_extraroute.ExtraRouteTestExtensionManager):
|
||||
|
||||
def get_actions(self):
|
||||
return []
|
||||
|
||||
def get_request_extensions(self):
|
||||
return []
|
||||
|
||||
def get_extended_resources(self, version):
|
||||
return pnet.get_extended_resources(version)
|
||||
|
||||
|
||||
class TestNoL3NatPlugin(test_l3_plugin.TestNoL3NatPlugin,
|
||||
agents_db.AgentDbMixin):
|
||||
|
||||
# There is no need to expose agent REST API
|
||||
supported_extension_aliases = ["external-net", "provider"]
|
||||
NET_TYPE = 'vlan'
|
||||
|
||||
def __init__(self):
|
||||
self.tags = {}
|
||||
self.tag = 1
|
||||
super(TestNoL3NatPlugin, self).__init__()
|
||||
|
||||
def _make_network_dict(self, network, fields=None,
|
||||
process_extensions=True):
|
||||
res = {'id': network['id'],
|
||||
'name': network['name'],
|
||||
'tenant_id': network['tenant_id'],
|
||||
'admin_state_up': network['admin_state_up'],
|
||||
'status': network['status'],
|
||||
'shared': network['shared'],
|
||||
'subnets': [subnet['id']
|
||||
for subnet in network['subnets']]}
|
||||
try:
|
||||
tag = self.tags[network['id']]
|
||||
except KeyError:
|
||||
self.tag += 1
|
||||
tag = self.tag
|
||||
self.tags[network['id']] = tag
|
||||
res.update({pnet.PHYSICAL_NETWORK: 'phy',
|
||||
pnet.NETWORK_TYPE: self.NET_TYPE,
|
||||
pnet.SEGMENTATION_ID: tag})
|
||||
# Call auxiliary extend functions, if any
|
||||
if process_extensions:
|
||||
self._apply_dict_extend_functions(
|
||||
attributes.NETWORKS, res, network)
|
||||
return self._fields(res, fields)
|
||||
|
||||
def get_network_profiles(self, context, filters=None, fields=None):
|
||||
return [{'id': "1234"}]
|
||||
|
||||
def get_policy_profiles(self, context, filters=None, fields=None):
|
||||
return [{'id': "4321"}]
|
||||
|
||||
|
||||
# A set routes capable L3 routing service plugin class supporting appliances
|
||||
class TestApplianceL3RouterServicePlugin(
|
||||
agents_db.AgentDbMixin, common_db_mixin.CommonDbMixin,
|
||||
device_handling_db.DeviceHandlingMixin,
|
||||
l3_router_appliance_db.L3RouterApplianceDBMixin):
|
||||
|
||||
supported_extension_aliases = ["router", "extraroute"]
|
||||
|
||||
def __init__(self):
|
||||
self._setup_backlog_handling()
|
||||
self._svc_vm_mgr = service_vm_lib.ServiceVMManager()
|
||||
super(TestApplianceL3RouterServicePlugin, self).__init__()
|
||||
|
||||
def get_plugin_type(self):
|
||||
return service_constants.L3_ROUTER_NAT
|
||||
|
||||
def get_plugin_description(self):
|
||||
return "L3 Routing Service Plugin for testing"
|
||||
|
||||
|
||||
class L3RouterApplianceTestCaseBase(
|
||||
test_db_plugin.NeutronDbPluginV2TestCase,
|
||||
testlib_plugin.NotificationSetupHelper,
|
||||
device_handling_test_support.DeviceHandlingTestSupportMixin):
|
||||
|
||||
def setUp(self, core_plugin=None, l3_plugin=None, ext_mgr=None):
|
||||
# Save the global RESOURCE_ATTRIBUTE_MAP
|
||||
self.saved_attr_map = {}
|
||||
for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems():
|
||||
self.saved_attr_map[resource] = attrs.copy()
|
||||
if not core_plugin:
|
||||
core_plugin = CORE_PLUGIN_KLASS
|
||||
if l3_plugin is None:
|
||||
l3_plugin = L3_PLUGIN_KLASS
|
||||
service_plugins = {'l3_plugin_name': l3_plugin}
|
||||
cfg.CONF.set_override('api_extensions_path', extensions_path)
|
||||
|
||||
# for these tests we need to enable overlapping ips
|
||||
cfg.CONF.set_default('allow_overlapping_ips', True)
|
||||
cfg.CONF.set_default('max_routes', 3)
|
||||
if ext_mgr is None:
|
||||
ext_mgr = L3RouterApplianceTestExtensionManager()
|
||||
|
||||
super(L3RouterApplianceTestCaseBase, self).setUp(
|
||||
plugin=core_plugin, service_plugins=service_plugins,
|
||||
ext_mgr=ext_mgr)
|
||||
|
||||
self.core_plugin = manager.NeutronManager.get_plugin()
|
||||
self.plugin = manager.NeutronManager.get_service_plugins().get(
|
||||
service_constants.L3_ROUTER_NAT)
|
||||
|
||||
self.setup_notification_driver()
|
||||
|
||||
cfg.CONF.set_override('allow_sorting', True)
|
||||
test_opts = [
|
||||
cfg.StrOpt('auth_uri', default='http://localhost:35357/v2.0/'),
|
||||
cfg.StrOpt('identity_uri', default='http://localhost:5000'),
|
||||
cfg.StrOpt('admin_user', default='neutron'),
|
||||
cfg.StrOpt('admin_password', default='secrete')]
|
||||
cfg.CONF.register_opts(test_opts, 'keystone_authtoken')
|
||||
|
||||
self._mock_l3_admin_tenant()
|
||||
self._create_mgmt_nw_for_tests(self.fmt)
|
||||
self._mock_svc_vm_create_delete(self.plugin)
|
||||
self._mock_io_file_ops()
|
||||
|
||||
def restore_attribute_map(self):
|
||||
# Restore the original RESOURCE_ATTRIBUTE_MAP
|
||||
attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map
|
||||
|
||||
def tearDown(self):
|
||||
self._remove_mgmt_nw_for_tests()
|
||||
(neutron.tests.unit.cisco.l3.test_l3_router_appliance_plugin.
|
||||
TestApplianceL3RouterServicePlugin._mgmt_nw_uuid) = None
|
||||
(neutron.tests.unit.cisco.l3.test_l3_router_appliance_plugin.
|
||||
TestApplianceL3RouterServicePlugin._refresh_router_backlog) = True
|
||||
(neutron.tests.unit.cisco.l3.test_l3_router_appliance_plugin.
|
||||
TestApplianceL3RouterServicePlugin._nova_running) = False
|
||||
plugin = manager.NeutronManager.get_service_plugins()[
|
||||
service_constants.L3_ROUTER_NAT]
|
||||
plugin._heartbeat.stop()
|
||||
self.restore_attribute_map()
|
||||
super(L3RouterApplianceTestCaseBase, self).tearDown()
|
||||
|
||||
|
||||
class L3RouterApplianceVMTestCase(
|
||||
L3RouterApplianceTestCaseBase, test_l3_plugin.L3NatTestCaseBase,
|
||||
test_ext_extraroute.ExtraRouteDBTestCaseBase):
|
||||
|
||||
def setUp(self, core_plugin=None, l3_plugin=None, dm_plugin=None,
|
||||
ext_mgr=None):
|
||||
super(L3RouterApplianceVMTestCase, self).setUp(
|
||||
core_plugin=core_plugin, l3_plugin=l3_plugin, ext_mgr=ext_mgr)
|
||||
|
||||
def test_floatingip_with_assoc_fails(self):
|
||||
self._test_floatingip_with_assoc_fails(
|
||||
'neutron.db.l3_db.L3_NAT_dbonly_mixin._check_and_get_fip_assoc')
|
||||
|
||||
|
||||
class CfgAgentRouterApplianceVMTestCase(L3RouterApplianceTestCaseBase,
|
||||
test_l3_plugin.L3AgentDbTestCaseBase):
|
||||
|
||||
def setUp(self, core_plugin=None, l3_plugin=None, ext_mgr=None):
|
||||
super(CfgAgentRouterApplianceVMTestCase, self).setUp(
|
||||
core_plugin=core_plugin, l3_plugin=l3_plugin, ext_mgr=ext_mgr)
|
||||
# Rewire function name so we can use existing l3 agent tests
|
||||
# to test the cfg agent rpc.
|
||||
self.plugin.get_sync_data = self.plugin.get_sync_data_ext
|
||||
|
||||
def _test_notify_op_agent(self, target_func, *args):
|
||||
l3_rpc_agent_api_str = (
|
||||
'neutron.plugins.cisco.l3.rpc.l3_router_rpc_joint_agent_api'
|
||||
'.L3RouterJointAgentNotifyAPI')
|
||||
plugin = manager.NeutronManager.get_service_plugins()[
|
||||
service_constants.L3_ROUTER_NAT]
|
||||
oldNotify = plugin.l3_cfg_rpc_notifier
|
||||
try:
|
||||
with mock.patch(l3_rpc_agent_api_str) as notifyApi:
|
||||
plugin.l3_cfg_rpc_notifier = notifyApi
|
||||
kargs = [item for item in args]
|
||||
kargs.append(notifyApi)
|
||||
target_func(*kargs)
|
||||
except Exception:
|
||||
plugin.l3_cfg_rpc_notifier = oldNotify
|
||||
raise
|
||||
else:
|
||||
plugin.l3_cfg_rpc_notifier = oldNotify
|
||||
|
||||
|
||||
DB_PLUGIN_KLASS = ('neutron.tests.unit.cisco.l3.ovs_neutron_plugin.'
|
||||
'OVSNeutronPluginV2')
|
||||
|
||||
HOST = 'my_cfgagent_host'
|
||||
FIRST_CFG_AGENT = {
|
||||
'binary': 'neutron-cisco-cfg-agent',
|
||||
'host': HOST,
|
||||
'topic': c_constants.CFG_AGENT,
|
||||
'configurations': {},
|
||||
'agent_type': c_constants.AGENT_TYPE_CFG,
|
||||
'start_flag': True
|
||||
}
|
||||
|
||||
|
||||
class RouterSchedulingTestCase(L3RouterApplianceTestCaseBase,
|
||||
test_l3_plugin.L3NatTestCaseMixin):
|
||||
|
||||
def setUp(self):
|
||||
super(RouterSchedulingTestCase, self).setUp()
|
||||
self.adminContext = n_context.get_admin_context()
|
||||
|
||||
def _register_cfg_agent(self):
|
||||
callback = agents_db.AgentExtRpcCallback()
|
||||
callback.report_state(self.adminContext,
|
||||
agent_state={'agent_state': FIRST_CFG_AGENT},
|
||||
time=timeutils.strtime())
|
||||
agent_db = self.core_plugin.get_agents_db(self.adminContext,
|
||||
filters={'host': [HOST]})
|
||||
self.agent_id1 = agent_db[0].id
|
||||
|
||||
def _update_router_name(self, router_id, new_name='new_name'):
|
||||
return self._update('routers', router_id,
|
||||
{'router': {'name': new_name}},
|
||||
expected_code=exc.HTTPOk.code)
|
||||
|
||||
def test_router_scheduled_to_device_with_no_cfg_agent(self):
|
||||
with self.router() as router:
|
||||
r_id = router['router']['id']
|
||||
self._update_router_name(r_id)
|
||||
routers = self.plugin.get_sync_data_ext(self.adminContext,
|
||||
[r_id])
|
||||
self.assertEqual(1, len(routers))
|
||||
hosting_device = routers[0]['hosting_device']
|
||||
self.assertIsNotNone(hosting_device)
|
||||
self.assertIsNone(hosting_device['cfg_agent_id'])
|
||||
|
||||
def test_router_not_scheduled_to_device_without_nova_services(self):
|
||||
self._nclient_services_mock.list = self._novaclient_services_list(
|
||||
False)
|
||||
with self.router() as router:
|
||||
r_id = router['router']['id']
|
||||
self._update_router_name(r_id)
|
||||
routers = self.plugin.get_sync_data_ext(self.adminContext,
|
||||
[r_id])
|
||||
self.assertEqual(1, len(routers))
|
||||
hosting_device = routers[0]['hosting_device']
|
||||
self.assertIsNone(hosting_device)
|
||||
|
||||
def test_router_scheduled_to_device_and_cfg_agent(self):
|
||||
self._register_cfg_agent()
|
||||
cfg_rpc = l3_router_cfgagent_rpc_cb.L3RouterCfgRpcCallbackMixin()
|
||||
cfg_rpc._core_plugin = self.core_plugin
|
||||
cfg_rpc._l3plugin = self.plugin
|
||||
with self.router() as router:
|
||||
r_id = router['router']['id']
|
||||
self._update_router_name(r_id)
|
||||
routers = cfg_rpc.cfg_sync_routers(
|
||||
self.adminContext, host=HOST)
|
||||
self.assertEqual(1, len(routers))
|
||||
hosting_device = routers[0]['hosting_device']
|
||||
self.assertIsNotNone(hosting_device)
|
||||
self.assertIsNotNone(hosting_device['cfg_agent_id'])
|
||||
|
||||
def test_dead_device_is_removed(self):
|
||||
cfg_dh_rpc = devices_cfgagent_rpc_cb.DeviceCfgRpcCallbackMixin()
|
||||
cfg_dh_rpc._l3plugin = self.plugin
|
||||
with mock.patch(
|
||||
'neutron.plugins.cisco.l3.rpc.l3_router_rpc_joint_agent_api.'
|
||||
'L3RouterJointAgentNotifyAPI.hosting_devices_removed') as (
|
||||
mock_notify):
|
||||
with self.router() as router:
|
||||
r_id = router['router']['id']
|
||||
routers_1 = self.plugin.get_sync_data_ext(self.adminContext,
|
||||
[r_id])
|
||||
self.assertEqual(1, len(routers_1))
|
||||
hosting_device_1 = routers_1[0]['hosting_device']
|
||||
self.assertIsNotNone(hosting_device_1)
|
||||
cfg_dh_rpc.report_non_responding_hosting_devices(
|
||||
self.adminContext,
|
||||
host=None,
|
||||
hosting_device_ids=[hosting_device_1['id']])
|
||||
self.assertEqual(1, mock_notify.call_count)
|
||||
mock_notify.assert_called_with(
|
||||
mock.ANY,
|
||||
{hosting_device_1['id']: {'routers': [r_id]}},
|
||||
False,
|
||||
mock.ANY)
|
||||
|
||||
def test_cfg_agent_registration_triggers_autoscheduling(self):
|
||||
with self.router() as router:
|
||||
r_id = router['router']['id']
|
||||
routers_1 = self.plugin.get_sync_data_ext(self.adminContext,
|
||||
[r_id])
|
||||
self.assertEqual(1, len(routers_1))
|
||||
hosting_device_1 = routers_1[0]['hosting_device']
|
||||
self.assertIsNotNone(hosting_device_1)
|
||||
self.assertIsNone(hosting_device_1['cfg_agent_id'])
|
||||
cfg_dh_rpc = devices_cfgagent_rpc_cb.DeviceCfgRpcCallbackMixin()
|
||||
cfg_dh_rpc._l3plugin = self.plugin
|
||||
self._register_cfg_agent()
|
||||
res = cfg_dh_rpc.register_for_duty(self.adminContext, host=HOST)
|
||||
self.assertTrue(res)
|
||||
routers_2 = self.plugin.get_sync_data_ext(self.adminContext,
|
||||
[r_id])
|
||||
self.assertEqual(1, len(routers_2))
|
||||
hosting_device_2 = routers_2[0]['hosting_device']
|
||||
self.assertIsNotNone(hosting_device_2)
|
||||
self.assertIsNotNone(hosting_device_2['cfg_agent_id'])
|
@ -88,7 +88,7 @@ setup-hooks =
|
||||
|
||||
[entry_points]
|
||||
console_scripts =
|
||||
neutron-cisco-cfg-agent = neutron.plugins.cisco.cfg_agent.cfg_agent:main
|
||||
neutron-cisco-cfg-agent = networking_cisco.plugins.cisco.cfg_agent.cfg_agent:main
|
||||
neutron-db-manage = neutron.db.migration.cli:main
|
||||
neutron-debug = neutron.debug.shell:main
|
||||
neutron-dhcp-agent = neutron.cmd.eventlet.agents.dhcp:main
|
||||
|
Loading…
Reference in New Issue
Block a user