Decompose the VMware plugin
This patch remove all the business logic pertaining to the VMware plugin. The following modules are left in openstack/neutron: - plugin-specific API extension declarations - database models (and a module with constants they use) - integration module pointing to the external repository Change-Id: I8a01a977889b36015a9cfa900173c05bfd516457 Partially-Implements: blueprint core-vendor-decomposition
This commit is contained in:
parent
7cd356964c
commit
56bc2e093e
@ -1,29 +0,0 @@
|
||||
# Copyright 2012 VMware, Inc.
|
||||
#
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
import httplib
|
||||
|
||||
|
||||
def ctrl_conn_to_str(conn):
|
||||
"""Returns a string representing a connection URL to the controller."""
|
||||
if isinstance(conn, httplib.HTTPSConnection):
|
||||
proto = "https://"
|
||||
elif isinstance(conn, httplib.HTTPConnection):
|
||||
proto = "http://"
|
||||
else:
|
||||
raise TypeError(_('Invalid connection type: %s') % type(conn))
|
||||
return "%s%s:%s" % (proto, conn.host, conn.port)
|
@ -1,247 +0,0 @@
|
||||
# Copyright 2012 VMware, Inc.
|
||||
#
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import abc
|
||||
import httplib
|
||||
import six
|
||||
import time
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
from neutron.i18n import _LE, _LI, _LW
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.vmware import api_client
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
GENERATION_ID_TIMEOUT = -1
|
||||
DEFAULT_CONCURRENT_CONNECTIONS = 3
|
||||
DEFAULT_CONNECT_TIMEOUT = 5
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class ApiClientBase(object):
|
||||
"""An abstract baseclass for all API client implementations."""
|
||||
|
||||
def _create_connection(self, host, port, is_ssl):
|
||||
if is_ssl:
|
||||
return httplib.HTTPSConnection(host, port,
|
||||
timeout=self._connect_timeout)
|
||||
return httplib.HTTPConnection(host, port,
|
||||
timeout=self._connect_timeout)
|
||||
|
||||
@staticmethod
|
||||
def _conn_params(http_conn):
|
||||
is_ssl = isinstance(http_conn, httplib.HTTPSConnection)
|
||||
return (http_conn.host, http_conn.port, is_ssl)
|
||||
|
||||
@property
|
||||
def user(self):
|
||||
return self._user
|
||||
|
||||
@property
|
||||
def password(self):
|
||||
return self._password
|
||||
|
||||
@property
|
||||
def config_gen(self):
|
||||
# If NSX_gen_timeout is not -1 then:
|
||||
# Maintain a timestamp along with the generation ID. Hold onto the
|
||||
# ID long enough to be useful and block on sequential requests but
|
||||
# not long enough to persist when Onix db is cleared, which resets
|
||||
# the generation ID, causing the DAL to block indefinitely with some
|
||||
# number that's higher than the cluster's value.
|
||||
if self._gen_timeout != -1:
|
||||
ts = self._config_gen_ts
|
||||
if ts is not None:
|
||||
if (time.time() - ts) > self._gen_timeout:
|
||||
return None
|
||||
return self._config_gen
|
||||
|
||||
@config_gen.setter
|
||||
def config_gen(self, value):
|
||||
if self._config_gen != value:
|
||||
if self._gen_timeout != -1:
|
||||
self._config_gen_ts = time.time()
|
||||
self._config_gen = value
|
||||
|
||||
def auth_cookie(self, conn):
|
||||
cookie = None
|
||||
data = self._get_provider_data(conn)
|
||||
if data:
|
||||
cookie = data[1]
|
||||
return cookie
|
||||
|
||||
def set_auth_cookie(self, conn, cookie):
|
||||
data = self._get_provider_data(conn)
|
||||
if data:
|
||||
self._set_provider_data(conn, (data[0], cookie))
|
||||
|
||||
def acquire_connection(self, auto_login=True, headers=None, rid=-1):
|
||||
'''Check out an available HTTPConnection instance.
|
||||
|
||||
Blocks until a connection is available.
|
||||
:auto_login: automatically logins before returning conn
|
||||
:headers: header to pass on to login attempt
|
||||
:param rid: request id passed in from request eventlet.
|
||||
:returns: An available HTTPConnection instance or None if no
|
||||
api_providers are configured.
|
||||
'''
|
||||
if not self._api_providers:
|
||||
LOG.warn(_LW("[%d] no API providers currently available."), rid)
|
||||
return None
|
||||
if self._conn_pool.empty():
|
||||
LOG.debug("[%d] Waiting to acquire API client connection.", rid)
|
||||
priority, conn = self._conn_pool.get()
|
||||
now = time.time()
|
||||
if getattr(conn, 'last_used', now) < now - cfg.CONF.conn_idle_timeout:
|
||||
LOG.info(_LI("[%(rid)d] Connection %(conn)s idle for %(sec)0.2f "
|
||||
"seconds; reconnecting."),
|
||||
{'rid': rid, 'conn': api_client.ctrl_conn_to_str(conn),
|
||||
'sec': now - conn.last_used})
|
||||
conn = self._create_connection(*self._conn_params(conn))
|
||||
|
||||
conn.last_used = now
|
||||
conn.priority = priority # stash current priority for release
|
||||
qsize = self._conn_pool.qsize()
|
||||
LOG.debug("[%(rid)d] Acquired connection %(conn)s. %(qsize)d "
|
||||
"connection(s) available.",
|
||||
{'rid': rid, 'conn': api_client.ctrl_conn_to_str(conn),
|
||||
'qsize': qsize})
|
||||
if auto_login and self.auth_cookie(conn) is None:
|
||||
self._wait_for_login(conn, headers)
|
||||
return conn
|
||||
|
||||
def release_connection(self, http_conn, bad_state=False,
|
||||
service_unavail=False, rid=-1):
|
||||
'''Mark HTTPConnection instance as available for check-out.
|
||||
|
||||
:param http_conn: An HTTPConnection instance obtained from this
|
||||
instance.
|
||||
:param bad_state: True if http_conn is known to be in a bad state
|
||||
(e.g. connection fault.)
|
||||
:service_unavail: True if http_conn returned 503 response.
|
||||
:param rid: request id passed in from request eventlet.
|
||||
'''
|
||||
conn_params = self._conn_params(http_conn)
|
||||
if self._conn_params(http_conn) not in self._api_providers:
|
||||
LOG.debug("[%(rid)d] Released connection %(conn)s is not an "
|
||||
"API provider for the cluster",
|
||||
{'rid': rid,
|
||||
'conn': api_client.ctrl_conn_to_str(http_conn)})
|
||||
return
|
||||
elif hasattr(http_conn, "no_release"):
|
||||
return
|
||||
|
||||
priority = http_conn.priority
|
||||
if bad_state:
|
||||
# Reconnect to provider.
|
||||
LOG.warn(_LW("[%(rid)d] Connection returned in bad state, "
|
||||
"reconnecting to %(conn)s"),
|
||||
{'rid': rid,
|
||||
'conn': api_client.ctrl_conn_to_str(http_conn)})
|
||||
http_conn = self._create_connection(*self._conn_params(http_conn))
|
||||
elif service_unavail:
|
||||
# http_conn returned a service unaviable response, put other
|
||||
# connections to the same controller at end of priority queue,
|
||||
conns = []
|
||||
while not self._conn_pool.empty():
|
||||
priority, conn = self._conn_pool.get()
|
||||
if self._conn_params(conn) == conn_params:
|
||||
priority = self._next_conn_priority
|
||||
self._next_conn_priority += 1
|
||||
conns.append((priority, conn))
|
||||
for priority, conn in conns:
|
||||
self._conn_pool.put((priority, conn))
|
||||
# put http_conn at end of queue also
|
||||
priority = self._next_conn_priority
|
||||
self._next_conn_priority += 1
|
||||
|
||||
self._conn_pool.put((priority, http_conn))
|
||||
LOG.debug("[%(rid)d] Released connection %(conn)s. %(qsize)d "
|
||||
"connection(s) available.",
|
||||
{'rid': rid, 'conn': api_client.ctrl_conn_to_str(http_conn),
|
||||
'qsize': self._conn_pool.qsize()})
|
||||
|
||||
def _wait_for_login(self, conn, headers=None):
|
||||
'''Block until a login has occurred for the current API provider.'''
|
||||
|
||||
data = self._get_provider_data(conn)
|
||||
if data is None:
|
||||
LOG.error(_LE("Login request for an invalid connection: '%s'"),
|
||||
api_client.ctrl_conn_to_str(conn))
|
||||
return
|
||||
provider_sem = data[0]
|
||||
if provider_sem.acquire(blocking=False):
|
||||
try:
|
||||
cookie = self._login(conn, headers)
|
||||
self.set_auth_cookie(conn, cookie)
|
||||
finally:
|
||||
provider_sem.release()
|
||||
else:
|
||||
LOG.debug("Waiting for auth to complete")
|
||||
# Wait until we can acquire then release
|
||||
provider_sem.acquire(blocking=True)
|
||||
provider_sem.release()
|
||||
|
||||
def _get_provider_data(self, conn_or_conn_params, default=None):
|
||||
"""Get data for specified API provider.
|
||||
|
||||
Args:
|
||||
conn_or_conn_params: either a HTTP(S)Connection object or the
|
||||
resolved conn_params tuple returned by self._conn_params().
|
||||
default: conn_params if ones passed aren't known
|
||||
Returns: Data associated with specified provider
|
||||
"""
|
||||
conn_params = self._normalize_conn_params(conn_or_conn_params)
|
||||
return self._api_provider_data.get(conn_params, default)
|
||||
|
||||
def _set_provider_data(self, conn_or_conn_params, data):
|
||||
"""Set data for specified API provider.
|
||||
|
||||
Args:
|
||||
conn_or_conn_params: either a HTTP(S)Connection object or the
|
||||
resolved conn_params tuple returned by self._conn_params().
|
||||
data: data to associate with API provider
|
||||
"""
|
||||
conn_params = self._normalize_conn_params(conn_or_conn_params)
|
||||
if data is None:
|
||||
del self._api_provider_data[conn_params]
|
||||
else:
|
||||
self._api_provider_data[conn_params] = data
|
||||
|
||||
def _normalize_conn_params(self, conn_or_conn_params):
|
||||
"""Normalize conn_param tuple.
|
||||
|
||||
Args:
|
||||
conn_or_conn_params: either a HTTP(S)Connection object or the
|
||||
resolved conn_params tuple returned by self._conn_params().
|
||||
|
||||
Returns: Normalized conn_param tuple
|
||||
"""
|
||||
if (not isinstance(conn_or_conn_params, tuple) and
|
||||
not isinstance(conn_or_conn_params, httplib.HTTPConnection)):
|
||||
LOG.debug("Invalid conn_params value: '%s'",
|
||||
str(conn_or_conn_params))
|
||||
return conn_or_conn_params
|
||||
if isinstance(conn_or_conn_params, httplib.HTTPConnection):
|
||||
conn_params = self._conn_params(conn_or_conn_params)
|
||||
else:
|
||||
conn_params = conn_or_conn_params
|
||||
host, port, is_ssl = conn_params
|
||||
if port is None:
|
||||
port = 443 if is_ssl else 80
|
||||
return (host, port, is_ssl)
|
@ -1,140 +0,0 @@
|
||||
# Copyright 2012 VMware, Inc.
|
||||
#
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
import httplib
|
||||
|
||||
from neutron.i18n import _LE
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.vmware.api_client import base
|
||||
from neutron.plugins.vmware.api_client import eventlet_client
|
||||
from neutron.plugins.vmware.api_client import eventlet_request
|
||||
from neutron.plugins.vmware.api_client import exception
|
||||
from neutron.plugins.vmware.api_client import version
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class NsxApiClient(eventlet_client.EventletApiClient):
|
||||
"""The Nsx API Client."""
|
||||
|
||||
def __init__(self, api_providers, user, password,
|
||||
concurrent_connections=base.DEFAULT_CONCURRENT_CONNECTIONS,
|
||||
gen_timeout=base.GENERATION_ID_TIMEOUT,
|
||||
use_https=True,
|
||||
connect_timeout=base.DEFAULT_CONNECT_TIMEOUT,
|
||||
http_timeout=75, retries=2, redirects=2):
|
||||
'''Constructor. Adds the following:
|
||||
|
||||
:param http_timeout: how long to wait before aborting an
|
||||
unresponsive controller (and allow for retries to another
|
||||
controller in the cluster)
|
||||
:param retries: the number of concurrent connections.
|
||||
:param redirects: the number of concurrent connections.
|
||||
'''
|
||||
super(NsxApiClient, self).__init__(
|
||||
api_providers, user, password,
|
||||
concurrent_connections=concurrent_connections,
|
||||
gen_timeout=gen_timeout, use_https=use_https,
|
||||
connect_timeout=connect_timeout)
|
||||
|
||||
self._request_timeout = http_timeout * retries
|
||||
self._http_timeout = http_timeout
|
||||
self._retries = retries
|
||||
self._redirects = redirects
|
||||
self._version = None
|
||||
|
||||
# NOTE(salvatore-orlando): This method is not used anymore. Login is now
|
||||
# performed automatically inside the request eventlet if necessary.
|
||||
def login(self, user=None, password=None):
|
||||
'''Login to NSX controller.
|
||||
|
||||
Assumes same password is used for all controllers.
|
||||
|
||||
:param user: controller user (usually admin). Provided for
|
||||
backwards compatibility. In the normal mode of operation
|
||||
this should be None.
|
||||
:param password: controller password. Provided for backwards
|
||||
compatibility. In the normal mode of operation this should
|
||||
be None.
|
||||
'''
|
||||
if user:
|
||||
self._user = user
|
||||
if password:
|
||||
self._password = password
|
||||
|
||||
return self._login()
|
||||
|
||||
def request(self, method, url, body="", content_type="application/json"):
|
||||
'''Issues request to controller.'''
|
||||
|
||||
g = eventlet_request.GenericRequestEventlet(
|
||||
self, method, url, body, content_type, auto_login=True,
|
||||
http_timeout=self._http_timeout,
|
||||
retries=self._retries, redirects=self._redirects)
|
||||
g.start()
|
||||
response = g.join()
|
||||
LOG.debug('Request returns "%s"', response)
|
||||
|
||||
# response is a modified HTTPResponse object or None.
|
||||
# response.read() will not work on response as the underlying library
|
||||
# request_eventlet.ApiRequestEventlet has already called this
|
||||
# method in order to extract the body and headers for processing.
|
||||
# ApiRequestEventlet derived classes call .read() and
|
||||
# .getheaders() on the HTTPResponse objects and store the results in
|
||||
# the response object's .body and .headers data members for future
|
||||
# access.
|
||||
|
||||
if response is None:
|
||||
# Timeout.
|
||||
LOG.error(_LE('Request timed out: %(method)s to %(url)s'),
|
||||
{'method': method, 'url': url})
|
||||
raise exception.RequestTimeout()
|
||||
|
||||
status = response.status
|
||||
if status == httplib.UNAUTHORIZED:
|
||||
raise exception.UnAuthorizedRequest()
|
||||
|
||||
# Fail-fast: Check for exception conditions and raise the
|
||||
# appropriate exceptions for known error codes.
|
||||
if status in exception.ERROR_MAPPINGS:
|
||||
LOG.error(_LE("Received error code: %s"), status)
|
||||
LOG.error(_LE("Server Error Message: %s"), response.body)
|
||||
exception.ERROR_MAPPINGS[status](response)
|
||||
|
||||
# Continue processing for non-error condition.
|
||||
if (status != httplib.OK and status != httplib.CREATED
|
||||
and status != httplib.NO_CONTENT):
|
||||
LOG.error(_LE("%(method)s to %(url)s, unexpected response code: "
|
||||
"%(status)d (content = '%(body)s')"),
|
||||
{'method': method, 'url': url,
|
||||
'status': response.status, 'body': response.body})
|
||||
return None
|
||||
|
||||
if not self._version:
|
||||
self._version = version.find_version(response.headers)
|
||||
return response.body
|
||||
|
||||
def get_version(self):
|
||||
if not self._version:
|
||||
# Determine the controller version by querying the
|
||||
# cluster nodes. Currently, the version will be the
|
||||
# one of the server that responds.
|
||||
self.request('GET', '/ws.v1/control-cluster/node')
|
||||
if not self._version:
|
||||
LOG.error(_LE('Unable to determine NSX version. '
|
||||
'Plugin might not work as expected.'))
|
||||
return self._version
|
@ -1,156 +0,0 @@
|
||||
# Copyright 2012 VMware, Inc.
|
||||
#
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
import time
|
||||
|
||||
import eventlet
|
||||
eventlet.monkey_patch()
|
||||
|
||||
from neutron.i18n import _LE
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.vmware.api_client import base
|
||||
from neutron.plugins.vmware.api_client import eventlet_request
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class EventletApiClient(base.ApiClientBase):
|
||||
"""Eventlet-based implementation of NSX ApiClient ABC."""
|
||||
|
||||
def __init__(self, api_providers, user, password,
|
||||
concurrent_connections=base.DEFAULT_CONCURRENT_CONNECTIONS,
|
||||
gen_timeout=base.GENERATION_ID_TIMEOUT,
|
||||
use_https=True,
|
||||
connect_timeout=base.DEFAULT_CONNECT_TIMEOUT):
|
||||
'''Constructor
|
||||
|
||||
:param api_providers: a list of tuples of the form: (host, port,
|
||||
is_ssl).
|
||||
:param user: login username.
|
||||
:param password: login password.
|
||||
:param concurrent_connections: total number of concurrent connections.
|
||||
:param use_https: whether or not to use https for requests.
|
||||
:param connect_timeout: connection timeout in seconds.
|
||||
:param gen_timeout controls how long the generation id is kept
|
||||
if set to -1 the generation id is never timed out
|
||||
'''
|
||||
if not api_providers:
|
||||
api_providers = []
|
||||
self._api_providers = set([tuple(p) for p in api_providers])
|
||||
self._api_provider_data = {} # tuple(semaphore, session_cookie)
|
||||
for p in self._api_providers:
|
||||
self._set_provider_data(p, (eventlet.semaphore.Semaphore(1), None))
|
||||
self._user = user
|
||||
self._password = password
|
||||
self._concurrent_connections = concurrent_connections
|
||||
self._use_https = use_https
|
||||
self._connect_timeout = connect_timeout
|
||||
self._config_gen = None
|
||||
self._config_gen_ts = None
|
||||
self._gen_timeout = gen_timeout
|
||||
|
||||
# Connection pool is a list of queues.
|
||||
self._conn_pool = eventlet.queue.PriorityQueue()
|
||||
self._next_conn_priority = 1
|
||||
for host, port, is_ssl in api_providers:
|
||||
for _ in range(concurrent_connections):
|
||||
conn = self._create_connection(host, port, is_ssl)
|
||||
self._conn_pool.put((self._next_conn_priority, conn))
|
||||
self._next_conn_priority += 1
|
||||
|
||||
def acquire_redirect_connection(self, conn_params, auto_login=True,
|
||||
headers=None):
|
||||
"""Check out or create connection to redirected NSX API server.
|
||||
|
||||
Args:
|
||||
conn_params: tuple specifying target of redirect, see
|
||||
self._conn_params()
|
||||
auto_login: returned connection should have valid session cookie
|
||||
headers: headers to pass on if auto_login
|
||||
|
||||
Returns: An available HTTPConnection instance corresponding to the
|
||||
specified conn_params. If a connection did not previously
|
||||
exist, new connections are created with the highest prioity
|
||||
in the connection pool and one of these new connections
|
||||
returned.
|
||||
"""
|
||||
result_conn = None
|
||||
data = self._get_provider_data(conn_params)
|
||||
if data:
|
||||
# redirect target already exists in provider data and connections
|
||||
# to the provider have been added to the connection pool. Try to
|
||||
# obtain a connection from the pool, note that it's possible that
|
||||
# all connection to the provider are currently in use.
|
||||
conns = []
|
||||
while not self._conn_pool.empty():
|
||||
priority, conn = self._conn_pool.get_nowait()
|
||||
if not result_conn and self._conn_params(conn) == conn_params:
|
||||
conn.priority = priority
|
||||
result_conn = conn
|
||||
else:
|
||||
conns.append((priority, conn))
|
||||
for priority, conn in conns:
|
||||
self._conn_pool.put((priority, conn))
|
||||
# hack: if no free connections available, create new connection
|
||||
# and stash "no_release" attribute (so that we only exceed
|
||||
# self._concurrent_connections temporarily)
|
||||
if not result_conn:
|
||||
conn = self._create_connection(*conn_params)
|
||||
conn.priority = 0 # redirect connections have highest priority
|
||||
conn.no_release = True
|
||||
result_conn = conn
|
||||
else:
|
||||
#redirect target not already known, setup provider lists
|
||||
self._api_providers.update([conn_params])
|
||||
self._set_provider_data(conn_params,
|
||||
(eventlet.semaphore.Semaphore(1), None))
|
||||
# redirects occur during cluster upgrades, i.e. results to old
|
||||
# redirects to new, so give redirect targets highest priority
|
||||
priority = 0
|
||||
for i in range(self._concurrent_connections):
|
||||
conn = self._create_connection(*conn_params)
|
||||
conn.priority = priority
|
||||
if i == self._concurrent_connections - 1:
|
||||
break
|
||||
self._conn_pool.put((priority, conn))
|
||||
result_conn = conn
|
||||
if result_conn:
|
||||
result_conn.last_used = time.time()
|
||||
if auto_login and self.auth_cookie(conn) is None:
|
||||
self._wait_for_login(result_conn, headers)
|
||||
return result_conn
|
||||
|
||||
def _login(self, conn=None, headers=None):
|
||||
'''Issue login request and update authentication cookie.'''
|
||||
cookie = None
|
||||
g = eventlet_request.LoginRequestEventlet(
|
||||
self, self._user, self._password, conn, headers)
|
||||
g.start()
|
||||
ret = g.join()
|
||||
if ret:
|
||||
if isinstance(ret, Exception):
|
||||
LOG.error(_LE('Login error "%s"'), ret)
|
||||
raise ret
|
||||
|
||||
cookie = ret.getheader("Set-Cookie")
|
||||
if cookie:
|
||||
LOG.debug("Saving new authentication cookie '%s'", cookie)
|
||||
|
||||
return cookie
|
||||
|
||||
# Register as subclass.
|
||||
base.ApiClientBase.register(EventletApiClient)
|
@ -1,241 +0,0 @@
|
||||
# Copyright 2012 VMware, Inc.
|
||||
#
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import httplib
|
||||
import urllib
|
||||
|
||||
import eventlet
|
||||
from oslo_serialization import jsonutils
|
||||
|
||||
from neutron.i18n import _LI, _LW
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.vmware.api_client import request
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
USER_AGENT = "Neutron eventlet client/2.0"
|
||||
|
||||
|
||||
class EventletApiRequest(request.ApiRequest):
|
||||
'''Eventlet-based ApiRequest class.
|
||||
|
||||
This class will form the basis for eventlet-based ApiRequest classes
|
||||
'''
|
||||
|
||||
# Maximum number of green threads present in the system at one time.
|
||||
API_REQUEST_POOL_SIZE = request.DEFAULT_API_REQUEST_POOL_SIZE
|
||||
|
||||
# Pool of green threads. One green thread is allocated per incoming
|
||||
# request. Incoming requests will block when the pool is empty.
|
||||
API_REQUEST_POOL = eventlet.GreenPool(API_REQUEST_POOL_SIZE)
|
||||
|
||||
# A unique id is assigned to each incoming request. When the current
|
||||
# request id reaches MAXIMUM_REQUEST_ID it wraps around back to 0.
|
||||
MAXIMUM_REQUEST_ID = request.DEFAULT_MAXIMUM_REQUEST_ID
|
||||
|
||||
# The request id for the next incoming request.
|
||||
CURRENT_REQUEST_ID = 0
|
||||
|
||||
def __init__(self, client_obj, url, method="GET", body=None,
|
||||
headers=None,
|
||||
retries=request.DEFAULT_RETRIES,
|
||||
auto_login=True,
|
||||
redirects=request.DEFAULT_REDIRECTS,
|
||||
http_timeout=request.DEFAULT_HTTP_TIMEOUT, client_conn=None):
|
||||
'''Constructor.'''
|
||||
self._api_client = client_obj
|
||||
self._url = url
|
||||
self._method = method
|
||||
self._body = body
|
||||
self._headers = headers or {}
|
||||
self._request_timeout = http_timeout * retries
|
||||
self._retries = retries
|
||||
self._auto_login = auto_login
|
||||
self._redirects = redirects
|
||||
self._http_timeout = http_timeout
|
||||
self._client_conn = client_conn
|
||||
self._abort = False
|
||||
|
||||
self._request_error = None
|
||||
|
||||
if "User-Agent" not in self._headers:
|
||||
self._headers["User-Agent"] = USER_AGENT
|
||||
|
||||
self._green_thread = None
|
||||
# Retrieve and store this instance's unique request id.
|
||||
self._request_id = EventletApiRequest.CURRENT_REQUEST_ID
|
||||
# Update the class variable that tracks request id.
|
||||
# Request IDs wrap around at MAXIMUM_REQUEST_ID
|
||||
next_request_id = self._request_id + 1
|
||||
next_request_id %= self.MAXIMUM_REQUEST_ID
|
||||
EventletApiRequest.CURRENT_REQUEST_ID = next_request_id
|
||||
|
||||
@classmethod
|
||||
def _spawn(cls, func, *args, **kwargs):
|
||||
'''Allocate a green thread from the class pool.'''
|
||||
return cls.API_REQUEST_POOL.spawn(func, *args, **kwargs)
|
||||
|
||||
def spawn(self, func, *args, **kwargs):
|
||||
'''Spawn a new green thread with the supplied function and args.'''
|
||||
return self.__class__._spawn(func, *args, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def joinall(cls):
|
||||
'''Wait for all outstanding requests to complete.'''
|
||||
return cls.API_REQUEST_POOL.waitall()
|
||||
|
||||
def join(self):
|
||||
'''Wait for instance green thread to complete.'''
|
||||
if self._green_thread is not None:
|
||||
return self._green_thread.wait()
|
||||
return Exception(_('Joining an invalid green thread'))
|
||||
|
||||
def start(self):
|
||||
'''Start request processing.'''
|
||||
self._green_thread = self.spawn(self._run)
|
||||
|
||||
def copy(self):
|
||||
'''Return a copy of this request instance.'''
|
||||
return EventletApiRequest(
|
||||
self._api_client, self._url, self._method, self._body,
|
||||
self._headers, self._retries,
|
||||
self._auto_login, self._redirects, self._http_timeout)
|
||||
|
||||
def _run(self):
|
||||
'''Method executed within green thread.'''
|
||||
if self._request_timeout:
|
||||
# No timeout exception escapes the with block.
|
||||
with eventlet.timeout.Timeout(self._request_timeout, False):
|
||||
return self._handle_request()
|
||||
|
||||
LOG.info(_LI('[%d] Request timeout.'), self._rid())
|
||||
self._request_error = Exception(_('Request timeout'))
|
||||
return None
|
||||
else:
|
||||
return self._handle_request()
|
||||
|
||||
def _handle_request(self):
|
||||
'''First level request handling.'''
|
||||
attempt = 0
|
||||
timeout = 0
|
||||
response = None
|
||||
while response is None and attempt <= self._retries:
|
||||
eventlet.greenthread.sleep(timeout)
|
||||
attempt += 1
|
||||
|
||||
req = self._issue_request()
|
||||
# automatically raises any exceptions returned.
|
||||
if isinstance(req, httplib.HTTPResponse):
|
||||
timeout = 0
|
||||
if attempt <= self._retries and not self._abort:
|
||||
if req.status in (httplib.UNAUTHORIZED, httplib.FORBIDDEN):
|
||||
continue
|
||||
elif req.status == httplib.SERVICE_UNAVAILABLE:
|
||||
timeout = 0.5
|
||||
continue
|
||||
# else fall through to return the error code
|
||||
|
||||
LOG.debug("[%(rid)d] Completed request '%(method)s %(url)s'"
|
||||
": %(status)s",
|
||||
{'rid': self._rid(), 'method': self._method,
|
||||
'url': self._url, 'status': req.status})
|
||||
self._request_error = None
|
||||
response = req
|
||||
else:
|
||||
LOG.info(_LI('[%(rid)d] Error while handling request: '
|
||||
'%(req)s'),
|
||||
{'rid': self._rid(), 'req': req})
|
||||
self._request_error = req
|
||||
response = None
|
||||
return response
|
||||
|
||||
|
||||
class LoginRequestEventlet(EventletApiRequest):
|
||||
'''Process a login request.'''
|
||||
|
||||
def __init__(self, client_obj, user, password, client_conn=None,
|
||||
headers=None):
|
||||
if headers is None:
|
||||
headers = {}
|
||||
headers.update({"Content-Type": "application/x-www-form-urlencoded"})
|
||||
body = urllib.urlencode({"username": user, "password": password})
|
||||
super(LoginRequestEventlet, self).__init__(
|
||||
client_obj, "/ws.v1/login", "POST", body, headers,
|
||||
auto_login=False, client_conn=client_conn)
|
||||
|
||||
def session_cookie(self):
|
||||
if self.successful():
|
||||
return self.value.getheader("Set-Cookie")
|
||||
return None
|
||||
|
||||
|
||||
class GetApiProvidersRequestEventlet(EventletApiRequest):
|
||||
'''Get a list of API providers.'''
|
||||
|
||||
def __init__(self, client_obj):
|
||||
url = "/ws.v1/control-cluster/node?fields=roles"
|
||||
super(GetApiProvidersRequestEventlet, self).__init__(
|
||||
client_obj, url, "GET", auto_login=True)
|
||||
|
||||
def api_providers(self):
|
||||
"""Parse api_providers from response.
|
||||
|
||||
Returns: api_providers in [(host, port, is_ssl), ...] format
|
||||
"""
|
||||
def _provider_from_listen_addr(addr):
|
||||
# (pssl|ptcp):<ip>:<port> => (host, port, is_ssl)
|
||||
parts = addr.split(':')
|
||||
return (parts[1], int(parts[2]), parts[0] == 'pssl')
|
||||
|
||||
try:
|
||||
if self.successful():
|
||||
ret = []
|
||||
body = jsonutils.loads(self.value.body)
|
||||
for node in body.get('results', []):
|
||||
for role in node.get('roles', []):
|
||||
if role.get('role') == 'api_provider':
|
||||
addr = role.get('listen_addr')
|
||||
if addr:
|
||||
ret.append(_provider_from_listen_addr(addr))
|
||||
return ret
|
||||
except Exception as e:
|
||||
LOG.warn(_LW("[%(rid)d] Failed to parse API provider: %(e)s"),
|
||||
{'rid': self._rid(), 'e': e})
|
||||
# intentionally fall through
|
||||
return None
|
||||
|
||||
|
||||
class GenericRequestEventlet(EventletApiRequest):
|
||||
'''Handle a generic request.'''
|
||||
|
||||
def __init__(self, client_obj, method, url, body, content_type,
|
||||
auto_login=False,
|
||||
http_timeout=request.DEFAULT_HTTP_TIMEOUT,
|
||||
retries=request.DEFAULT_RETRIES,
|
||||
redirects=request.DEFAULT_REDIRECTS):
|
||||
headers = {"Content-Type": content_type}
|
||||
super(GenericRequestEventlet, self).__init__(
|
||||
client_obj, url, method, body, headers,
|
||||
retries=retries,
|
||||
auto_login=auto_login, redirects=redirects,
|
||||
http_timeout=http_timeout)
|
||||
|
||||
def session_cookie(self):
|
||||
if self.successful():
|
||||
return self.value.getheader("Set-Cookie")
|
||||
return None
|
||||
|
||||
|
||||
request.ApiRequest.register(EventletApiRequest)
|
@ -1,121 +0,0 @@
|
||||
# Copyright 2014 VMware, Inc.
|
||||
#
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
|
||||
class NsxApiException(Exception):
|
||||
"""Base NSX API Client Exception.
|
||||
|
||||
To correctly use this class, inherit from it and define
|
||||
a 'message' property. That message will get printf'd
|
||||
with the keyword arguments provided to the constructor.
|
||||
|
||||
"""
|
||||
message = _("An unknown exception occurred.")
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
try:
|
||||
self._error_string = self.message % kwargs
|
||||
except Exception:
|
||||
# at least get the core message out if something happened
|
||||
self._error_string = self.message
|
||||
|
||||
def __str__(self):
|
||||
return self._error_string
|
||||
|
||||
|
||||
class UnAuthorizedRequest(NsxApiException):
|
||||
message = _("Server denied session's authentication credentials.")
|
||||
|
||||
|
||||
class ResourceNotFound(NsxApiException):
|
||||
message = _("An entity referenced in the request was not found.")
|
||||
|
||||
|
||||
class Conflict(NsxApiException):
|
||||
message = _("Request conflicts with configuration on a different "
|
||||
"entity.")
|
||||
|
||||
|
||||
class ServiceUnavailable(NsxApiException):
|
||||
message = _("Request could not completed because the associated "
|
||||
"resource could not be reached.")
|
||||
|
||||
|
||||
class Forbidden(NsxApiException):
|
||||
message = _("The request is forbidden from accessing the "
|
||||
"referenced resource.")
|
||||
|
||||
|
||||
class ReadOnlyMode(Forbidden):
|
||||
message = _("Create/Update actions are forbidden when in read-only mode.")
|
||||
|
||||
|
||||
class RequestTimeout(NsxApiException):
|
||||
message = _("The request has timed out.")
|
||||
|
||||
|
||||
class BadRequest(NsxApiException):
|
||||
message = _("The server is unable to fulfill the request due "
|
||||
"to a bad syntax")
|
||||
|
||||
|
||||
class InvalidSecurityCertificate(BadRequest):
|
||||
message = _("The backend received an invalid security certificate.")
|
||||
|
||||
|
||||
def fourZeroZero(response=None):
|
||||
if response and "Invalid SecurityCertificate" in response.body:
|
||||
raise InvalidSecurityCertificate()
|
||||
raise BadRequest()
|
||||
|
||||
|
||||
def fourZeroFour(response=None):
|
||||
raise ResourceNotFound()
|
||||
|
||||
|
||||
def fourZeroNine(response=None):
|
||||
raise Conflict()
|
||||
|
||||
|
||||
def fiveZeroThree(response=None):
|
||||
raise ServiceUnavailable()
|
||||
|
||||
|
||||
def fourZeroThree(response=None):
|
||||
if 'read-only' in response.body:
|
||||
raise ReadOnlyMode()
|
||||
else:
|
||||
raise Forbidden()
|
||||
|
||||
|
||||
def zero(self, response=None):
|
||||
raise NsxApiException()
|
||||
|
||||
|
||||
ERROR_MAPPINGS = {
|
||||
400: fourZeroZero,
|
||||
404: fourZeroFour,
|
||||
405: zero,
|
||||
409: fourZeroNine,
|
||||
503: fiveZeroThree,
|
||||
403: fourZeroThree,
|
||||
301: zero,
|
||||
307: zero,
|
||||
500: zero,
|
||||
501: zero,
|
||||
503: zero
|
||||
}
|
@ -1,288 +0,0 @@
|
||||
# Copyright 2012 VMware, Inc.
|
||||
#
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
import abc
|
||||
import copy
|
||||
import httplib
|
||||
import time
|
||||
|
||||
import eventlet
|
||||
from oslo_utils import excutils
|
||||
import six
|
||||
import six.moves.urllib.parse as urlparse
|
||||
|
||||
from neutron.i18n import _LI, _LW
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.vmware import api_client
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
DEFAULT_HTTP_TIMEOUT = 30
|
||||
DEFAULT_RETRIES = 2
|
||||
DEFAULT_REDIRECTS = 2
|
||||
DEFAULT_API_REQUEST_POOL_SIZE = 1000
|
||||
DEFAULT_MAXIMUM_REQUEST_ID = 4294967295
|
||||
DOWNLOAD_TIMEOUT = 180
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class ApiRequest(object):
|
||||
'''An abstract baseclass for all ApiRequest implementations.
|
||||
|
||||
This defines the interface and property structure for both eventlet and
|
||||
gevent-based ApiRequest classes.
|
||||
'''
|
||||
|
||||
# List of allowed status codes.
|
||||
ALLOWED_STATUS_CODES = [
|
||||
httplib.OK,
|
||||
httplib.CREATED,
|
||||
httplib.NO_CONTENT,
|
||||
httplib.MOVED_PERMANENTLY,
|
||||
httplib.TEMPORARY_REDIRECT,
|
||||
httplib.BAD_REQUEST,
|
||||
httplib.UNAUTHORIZED,
|
||||
httplib.FORBIDDEN,
|
||||
httplib.NOT_FOUND,
|
||||
httplib.CONFLICT,
|
||||
httplib.INTERNAL_SERVER_ERROR,
|
||||
httplib.SERVICE_UNAVAILABLE
|
||||
]
|
||||
|
||||
@abc.abstractmethod
|
||||
def start(self):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def join(self):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def copy(self):
|
||||
pass
|
||||
|
||||
def _issue_request(self):
|
||||
'''Issue a request to a provider.'''
|
||||
conn = (self._client_conn or
|
||||
self._api_client.acquire_connection(True,
|
||||
copy.copy(self._headers),
|
||||
rid=self._rid()))
|
||||
if conn is None:
|
||||
error = Exception(_("No API connections available"))
|
||||
self._request_error = error
|
||||
return error
|
||||
|
||||
url = self._url
|
||||
LOG.debug("[%(rid)d] Issuing - request url: %(conn)s "
|
||||
"body: %(body)s",
|
||||
{'rid': self._rid(), 'conn': self._request_str(conn, url),
|
||||
'body': self._body})
|
||||
issued_time = time.time()
|
||||
is_conn_error = False
|
||||
is_conn_service_unavail = False
|
||||
response = None
|
||||
try:
|
||||
redirects = 0
|
||||
while (redirects <= self._redirects):
|
||||
# Update connection with user specified request timeout,
|
||||
# the connect timeout is usually smaller so we only set
|
||||
# the request timeout after a connection is established
|
||||
if conn.sock is None:
|
||||
conn.connect()
|
||||
conn.sock.settimeout(self._http_timeout)
|
||||
elif conn.sock.gettimeout() != self._http_timeout:
|
||||
conn.sock.settimeout(self._http_timeout)
|
||||
|
||||
headers = copy.copy(self._headers)
|
||||
cookie = self._api_client.auth_cookie(conn)
|
||||
if cookie:
|
||||
headers["Cookie"] = cookie
|
||||
|
||||
gen = self._api_client.config_gen
|
||||
if gen:
|
||||
headers["X-Nvp-Wait-For-Config-Generation"] = gen
|
||||
LOG.debug("Setting X-Nvp-Wait-For-Config-Generation "
|
||||
"request header: '%s'", gen)
|
||||
try:
|
||||
conn.request(self._method, url, self._body, headers)
|
||||
except Exception as e:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.warn(_LW("[%(rid)d] Exception issuing request: "
|
||||
"%(e)s"),
|
||||
{'rid': self._rid(), 'e': e})
|
||||
|
||||
response = conn.getresponse()
|
||||
response.body = response.read()
|
||||
response.headers = response.getheaders()
|
||||
elapsed_time = time.time() - issued_time
|
||||
LOG.debug("[%(rid)d] Completed request '%(conn)s': "
|
||||
"%(status)s (%(elapsed)s seconds)",
|
||||
{'rid': self._rid(),
|
||||
'conn': self._request_str(conn, url),
|
||||
'status': response.status,
|
||||
'elapsed': elapsed_time})
|
||||
|
||||
new_gen = response.getheader('X-Nvp-Config-Generation', None)
|
||||
if new_gen:
|
||||
LOG.debug("Reading X-Nvp-config-Generation response "
|
||||
"header: '%s'", new_gen)
|
||||
if (self._api_client.config_gen is None or
|
||||
self._api_client.config_gen < int(new_gen)):
|
||||
self._api_client.config_gen = int(new_gen)
|
||||
|
||||
if response.status == httplib.UNAUTHORIZED:
|
||||
|
||||
if cookie is None and self._url != "/ws.v1/login":
|
||||
# The connection still has no valid cookie despite
|
||||
# attempts to authenticate and the request has failed
|
||||
# with unauthorized status code. If this isn't a
|
||||
# a request to authenticate, we should abort the
|
||||
# request since there is no point in retrying.
|
||||
self._abort = True
|
||||
|
||||
# If request is unauthorized, clear the session cookie
|
||||
# for the current provider so that subsequent requests
|
||||
# to the same provider triggers re-authentication.
|
||||
self._api_client.set_auth_cookie(conn, None)
|
||||
elif response.status == httplib.SERVICE_UNAVAILABLE:
|
||||
is_conn_service_unavail = True
|
||||
|
||||
if response.status not in [httplib.MOVED_PERMANENTLY,
|
||||
httplib.TEMPORARY_REDIRECT]:
|
||||
break
|
||||
elif redirects >= self._redirects:
|
||||
LOG.info(_LI("[%d] Maximum redirects exceeded, aborting "
|
||||
"request"), self._rid())
|
||||
break
|
||||
redirects += 1
|
||||
|
||||
conn, url = self._redirect_params(conn, response.headers,
|
||||
self._client_conn is None)
|
||||
if url is None:
|
||||
response.status = httplib.INTERNAL_SERVER_ERROR
|
||||
break
|
||||
LOG.info(_LI("[%(rid)d] Redirecting request to: %(conn)s"),
|
||||
{'rid': self._rid(),
|
||||
'conn': self._request_str(conn, url)})
|
||||
# yield here, just in case we are not out of the loop yet
|
||||
eventlet.greenthread.sleep(0)
|
||||
# If we receive any of these responses, then
|
||||
# our server did not process our request and may be in an
|
||||
# errored state. Raise an exception, which will cause the
|
||||
# the conn to be released with is_conn_error == True
|
||||
# which puts the conn on the back of the client's priority
|
||||
# queue.
|
||||
if (response.status == httplib.INTERNAL_SERVER_ERROR and
|
||||
response.status > httplib.NOT_IMPLEMENTED):
|
||||
LOG.warn(_LW("[%(rid)d] Request '%(method)s %(url)s' "
|
||||
"received: %(status)s"),
|
||||
{'rid': self._rid(), 'method': self._method,
|
||||
'url': self._url, 'status': response.status})
|
||||
raise Exception(_('Server error return: %s'), response.status)
|
||||
return response
|
||||
except Exception as e:
|
||||
if isinstance(e, httplib.BadStatusLine):
|
||||
msg = (_("Invalid server response"))
|
||||
else:
|
||||
msg = unicode(e)
|
||||
if response is None:
|
||||
elapsed_time = time.time() - issued_time
|
||||
LOG.warn(_LW("[%(rid)d] Failed request '%(conn)s': '%(msg)s' "
|
||||
"(%(elapsed)s seconds)"),
|
||||
{'rid': self._rid(), 'conn': self._request_str(conn, url),
|
||||
'msg': msg, 'elapsed': elapsed_time})
|
||||
self._request_error = e
|
||||
is_conn_error = True
|
||||
return e
|
||||
finally:
|
||||
# Make sure we release the original connection provided by the
|
||||
# acquire_connection() call above.
|
||||
if self._client_conn is None:
|
||||
self._api_client.release_connection(conn, is_conn_error,
|
||||
is_conn_service_unavail,
|
||||
rid=self._rid())
|
||||
|
||||
def _redirect_params(self, conn, headers, allow_release_conn=False):
|
||||
"""Process redirect response, create new connection if necessary.
|
||||
|
||||
Args:
|
||||
conn: connection that returned the redirect response
|
||||
headers: response headers of the redirect response
|
||||
allow_release_conn: if redirecting to a different server,
|
||||
release existing connection back to connection pool.
|
||||
|
||||
Returns: Return tuple(conn, url) where conn is a connection object
|
||||
to the redirect target and url is the path of the API request
|
||||
"""
|
||||
|
||||
url = None
|
||||
for name, value in headers:
|
||||
if name.lower() == "location":
|
||||
url = value
|
||||
break
|
||||
if not url:
|
||||
LOG.warn(_LW("[%d] Received redirect status without location "
|
||||
"header field"), self._rid())
|
||||
return (conn, None)
|
||||
# Accept location with the following format:
|
||||
# 1. /path, redirect to same node
|
||||
# 2. scheme://hostname:[port]/path where scheme is https or http
|
||||
# Reject others
|
||||
# 3. e.g. relative paths, unsupported scheme, unspecified host
|
||||
result = urlparse.urlparse(url)
|
||||
if not result.scheme and not result.hostname and result.path:
|
||||
if result.path[0] == "/":
|
||||
if result.query:
|
||||
url = "%s?%s" % (result.path, result.query)
|
||||
else:
|
||||
url = result.path
|
||||
return (conn, url) # case 1
|
||||
else:
|
||||
LOG.warn(_LW("[%(rid)d] Received invalid redirect location: "
|
||||
"'%(url)s'"), {'rid': self._rid(), 'url': url})
|
||||
return (conn, None) # case 3
|
||||
elif result.scheme not in ["http", "https"] or not result.hostname:
|
||||
LOG.warn(_LW("[%(rid)d] Received malformed redirect "
|
||||
"location: %(url)s"),
|
||||
{'rid': self._rid(), 'url': url})
|
||||
return (conn, None) # case 3
|
||||
# case 2, redirect location includes a scheme
|
||||
# so setup a new connection and authenticate
|
||||
if allow_release_conn:
|
||||
self._api_client.release_connection(conn)
|
||||
conn_params = (result.hostname, result.port, result.scheme == "https")
|
||||
conn = self._api_client.acquire_redirect_connection(conn_params, True,
|
||||
self._headers)
|
||||
if result.query:
|
||||
url = "%s?%s" % (result.path, result.query)
|
||||
else:
|
||||
url = result.path
|
||||
return (conn, url)
|
||||
|
||||
def _rid(self):
|
||||
'''Return current request id.'''
|
||||
return self._request_id
|
||||
|
||||
@property
|
||||
def request_error(self):
|
||||
'''Return any errors associated with this instance.'''
|
||||
return self._request_error
|
||||
|
||||
def _request_str(self, conn, url):
|
||||
'''Return string representation of connection.'''
|
||||
return "%s %s/%s" % (self._method, api_client.ctrl_conn_to_str(conn),
|
||||
url)
|
@ -1,44 +0,0 @@
|
||||
# Copyright 2012 VMware, Inc.
|
||||
#
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
from neutron.i18n import _LW
|
||||
from neutron.openstack.common import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def find_version(headers):
|
||||
"""Retrieve NSX controller version from response headers."""
|
||||
for (header_name, header_value) in (headers or ()):
|
||||
try:
|
||||
if header_name == 'server':
|
||||
return Version(header_value.split('/')[1])
|
||||
except IndexError:
|
||||
LOG.warning(_LW("Unable to fetch NSX version from response "
|
||||
"headers :%s"), headers)
|
||||
|
||||
|
||||
class Version(object):
|
||||
"""Abstracts NSX version by exposing major and minor."""
|
||||
|
||||
def __init__(self, version):
|
||||
self.full_version = version.split('.')
|
||||
self.major = int(self.full_version[0])
|
||||
self.minor = int(self.full_version[1])
|
||||
|
||||
def __str__(self):
|
||||
return '.'.join(self.full_version)
|
@ -1,160 +0,0 @@
|
||||
# Copyright 2013 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import sys
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
from neutron.common import config
|
||||
from neutron.plugins.vmware.common import config as nsx_config # noqa
|
||||
from neutron.plugins.vmware.common import nsx_utils
|
||||
from neutron.plugins.vmware import nsxlib
|
||||
|
||||
config.setup_logging()
|
||||
|
||||
|
||||
def help(name):
|
||||
print("Usage: %s path/to/neutron/plugin/ini/config/file" % name)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def get_nsx_controllers(cluster):
|
||||
return cluster.nsx_controllers
|
||||
|
||||
|
||||
def config_helper(config_entity, cluster):
|
||||
try:
|
||||
return nsxlib.do_request('GET',
|
||||
"/ws.v1/%s?fields=uuid" % config_entity,
|
||||
cluster=cluster).get('results', [])
|
||||
except Exception as e:
|
||||
msg = (_("Error '%(err)s' when connecting to controller(s): %(ctl)s.")
|
||||
% {'err': e,
|
||||
'ctl': ', '.join(get_nsx_controllers(cluster))})
|
||||
raise Exception(msg)
|
||||
|
||||
|
||||
def get_control_cluster_nodes(cluster):
|
||||
return config_helper("control-cluster/node", cluster)
|
||||
|
||||
|
||||
def get_gateway_services(cluster):
|
||||
ret_gw_services = {"L2GatewayServiceConfig": [],
|
||||
"L3GatewayServiceConfig": []}
|
||||
gw_services = config_helper("gateway-service", cluster)
|
||||
for gw_service in gw_services:
|
||||
ret_gw_services[gw_service['type']].append(gw_service['uuid'])
|
||||
return ret_gw_services
|
||||
|
||||
|
||||
def get_transport_zones(cluster):
|
||||
transport_zones = config_helper("transport-zone", cluster)
|
||||
return [transport_zone['uuid'] for transport_zone in transport_zones]
|
||||
|
||||
|
||||
def get_transport_nodes(cluster):
|
||||
transport_nodes = config_helper("transport-node", cluster)
|
||||
return [transport_node['uuid'] for transport_node in transport_nodes]
|
||||
|
||||
|
||||
def is_transport_node_connected(cluster, node_uuid):
|
||||
try:
|
||||
return nsxlib.do_request('GET',
|
||||
"/ws.v1/transport-node/%s/status" % node_uuid,
|
||||
cluster=cluster)['connection']['connected']
|
||||
except Exception as e:
|
||||
msg = (_("Error '%(err)s' when connecting to controller(s): %(ctl)s.")
|
||||
% {'err': e,
|
||||
'ctl': ', '.join(get_nsx_controllers(cluster))})
|
||||
raise Exception(msg)
|
||||
|
||||
|
||||
def main():
|
||||
if len(sys.argv) != 2:
|
||||
help(sys.argv[0])
|
||||
args = ['--config-file']
|
||||
args.append(sys.argv[1])
|
||||
config.init(args)
|
||||
print("----------------------- Database Options -----------------------")
|
||||
print("\tconnection: %s" % cfg.CONF.database.connection)
|
||||
print("\tretry_interval: %d" % cfg.CONF.database.retry_interval)
|
||||
print("\tmax_retries: %d" % cfg.CONF.database.max_retries)
|
||||
print("----------------------- NSX Options -----------------------")
|
||||
print("\tNSX Generation Timeout %d" % cfg.CONF.NSX.nsx_gen_timeout)
|
||||
print("\tNumber of concurrent connections to each controller %d" %
|
||||
cfg.CONF.NSX.concurrent_connections)
|
||||
print("\tmax_lp_per_bridged_ls: %s" % cfg.CONF.NSX.max_lp_per_bridged_ls)
|
||||
print("\tmax_lp_per_overlay_ls: %s" % cfg.CONF.NSX.max_lp_per_overlay_ls)
|
||||
print("----------------------- Cluster Options -----------------------")
|
||||
print("\tretries: %s" % cfg.CONF.retries)
|
||||
print("\tredirects: %s" % cfg.CONF.redirects)
|
||||
print("\thttp_timeout: %s" % cfg.CONF.http_timeout)
|
||||
cluster = nsx_utils.create_nsx_cluster(
|
||||
cfg.CONF,
|
||||
cfg.CONF.NSX.concurrent_connections,
|
||||
cfg.CONF.NSX.nsx_gen_timeout)
|
||||
nsx_controllers = get_nsx_controllers(cluster)
|
||||
num_controllers = len(nsx_controllers)
|
||||
print("Number of controllers found: %s" % num_controllers)
|
||||
if num_controllers == 0:
|
||||
print("You must specify at least one controller!")
|
||||
sys.exit(1)
|
||||
|
||||
get_control_cluster_nodes(cluster)
|
||||
for controller in nsx_controllers:
|
||||
print("\tController endpoint: %s" % controller)
|
||||
gateway_services = get_gateway_services(cluster)
|
||||
default_gateways = {
|
||||
"L2GatewayServiceConfig": cfg.CONF.default_l2_gw_service_uuid,
|
||||
"L3GatewayServiceConfig": cfg.CONF.default_l3_gw_service_uuid}
|
||||
errors = 0
|
||||
for svc_type in default_gateways.keys():
|
||||
for uuid in gateway_services[svc_type]:
|
||||
print("\t\tGateway(%s) uuid: %s" % (svc_type, uuid))
|
||||
if (default_gateways[svc_type] and
|
||||
default_gateways[svc_type] not in gateway_services[svc_type]):
|
||||
print("\t\t\tError: specified default %s gateway (%s) is "
|
||||
"missing from NSX Gateway Services!" % (
|
||||
svc_type,
|
||||
default_gateways[svc_type]))
|
||||
errors += 1
|
||||
transport_zones = get_transport_zones(cluster)
|
||||
print("\tTransport zones: %s" % transport_zones)
|
||||
if cfg.CONF.default_tz_uuid not in transport_zones:
|
||||
print("\t\tError: specified default transport zone "
|
||||
"(%s) is missing from NSX transport zones!"
|
||||
% cfg.CONF.default_tz_uuid)
|
||||
errors += 1
|
||||
transport_nodes = get_transport_nodes(cluster)
|
||||
print("\tTransport nodes: %s" % transport_nodes)
|
||||
node_errors = []
|
||||
for node in transport_nodes:
|
||||
if not is_transport_node_connected(cluster, node):
|
||||
node_errors.append(node)
|
||||
|
||||
# Use different exit codes, so that we can distinguish
|
||||
# between config and runtime errors
|
||||
if len(node_errors):
|
||||
print("\nThere are one or mode transport nodes that are "
|
||||
"not connected: %s. Please, revise!" % node_errors)
|
||||
sys.exit(10)
|
||||
elif errors:
|
||||
print("\nThere are %d errors with your configuration. "
|
||||
"Please, revise!" % errors)
|
||||
sys.exit(12)
|
||||
else:
|
||||
print("Done.")
|
@ -1,199 +0,0 @@
|
||||
# Copyright 2012 VMware, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
from neutron.plugins.vmware.common import exceptions as nsx_exc
|
||||
|
||||
|
||||
class AgentModes(object):
|
||||
AGENT = 'agent'
|
||||
AGENTLESS = 'agentless'
|
||||
COMBINED = 'combined'
|
||||
|
||||
|
||||
class MetadataModes(object):
|
||||
DIRECT = 'access_network'
|
||||
INDIRECT = 'dhcp_host_route'
|
||||
|
||||
|
||||
class ReplicationModes(object):
|
||||
SERVICE = 'service'
|
||||
SOURCE = 'source'
|
||||
|
||||
|
||||
base_opts = [
|
||||
cfg.IntOpt('max_lp_per_bridged_ls', default=5000,
|
||||
deprecated_group='NVP',
|
||||
help=_("Maximum number of ports of a logical switch on a "
|
||||
"bridged transport zone (default 5000)")),
|
||||
cfg.IntOpt('max_lp_per_overlay_ls', default=256,
|
||||
deprecated_group='NVP',
|
||||
help=_("Maximum number of ports of a logical switch on an "
|
||||
"overlay transport zone (default 256)")),
|
||||
cfg.IntOpt('concurrent_connections', default=10,
|
||||
deprecated_group='NVP',
|
||||
help=_("Maximum concurrent connections to each NSX "
|
||||
"controller.")),
|
||||
cfg.IntOpt('nsx_gen_timeout', default=-1,
|
||||
deprecated_name='nvp_gen_timeout',
|
||||
deprecated_group='NVP',
|
||||
help=_("Number of seconds a generation id should be valid for "
|
||||
"(default -1 meaning do not time out)")),
|
||||
cfg.StrOpt('metadata_mode', default=MetadataModes.DIRECT,
|
||||
deprecated_group='NVP',
|
||||
help=_("If set to access_network this enables a dedicated "
|
||||
"connection to the metadata proxy for metadata server "
|
||||
"access via Neutron router. If set to dhcp_host_route "
|
||||
"this enables host route injection via the dhcp agent. "
|
||||
"This option is only useful if running on a host that "
|
||||
"does not support namespaces otherwise access_network "
|
||||
"should be used.")),
|
||||
cfg.StrOpt('default_transport_type', default='stt',
|
||||
choices=('stt', 'gre', 'bridge', 'ipsec_gre', 'ipsec_stt'),
|
||||
deprecated_group='NVP',
|
||||
help=_("The default network transport type to use")),
|
||||
cfg.StrOpt('agent_mode', default=AgentModes.AGENT,
|
||||
deprecated_group='NVP',
|
||||
help=_("The mode used to implement DHCP/metadata services.")),
|
||||
cfg.StrOpt('replication_mode', default=ReplicationModes.SERVICE,
|
||||
help=_("The default option leverages service nodes to perform"
|
||||
" packet replication though one could set to this to "
|
||||
"'source' to perform replication locally. This is useful"
|
||||
" if one does not want to deploy a service node(s). "
|
||||
"It must be set to 'service' for leveraging distributed "
|
||||
"routers."))
|
||||
]
|
||||
|
||||
sync_opts = [
|
||||
cfg.IntOpt('state_sync_interval', default=10,
|
||||
deprecated_group='NVP_SYNC',
|
||||
help=_("Interval in seconds between runs of the state "
|
||||
"synchronization task. Set it to 0 to disable it")),
|
||||
cfg.IntOpt('max_random_sync_delay', default=0,
|
||||
deprecated_group='NVP_SYNC',
|
||||
help=_("Maximum value for the additional random "
|
||||
"delay in seconds between runs of the state "
|
||||
"synchronization task")),
|
||||
cfg.IntOpt('min_sync_req_delay', default=1,
|
||||
deprecated_group='NVP_SYNC',
|
||||
help=_('Minimum delay, in seconds, between two state '
|
||||
'synchronization queries to NSX. It must not '
|
||||
'exceed state_sync_interval')),
|
||||
cfg.IntOpt('min_chunk_size', default=500,
|
||||
deprecated_group='NVP_SYNC',
|
||||
help=_('Minimum number of resources to be retrieved from NSX '
|
||||
'during state synchronization')),
|
||||
cfg.BoolOpt('always_read_status', default=False,
|
||||
deprecated_group='NVP_SYNC',
|
||||
help=_('Always read operational status from backend on show '
|
||||
'operations. Enabling this option might slow down '
|
||||
'the system.'))
|
||||
]
|
||||
|
||||
connection_opts = [
|
||||
cfg.StrOpt('nsx_user',
|
||||
default='admin',
|
||||
deprecated_name='nvp_user',
|
||||
help=_('User name for NSX controllers in this cluster')),
|
||||
cfg.StrOpt('nsx_password',
|
||||
default='admin',
|
||||
deprecated_name='nvp_password',
|
||||
secret=True,
|
||||
help=_('Password for NSX controllers in this cluster')),
|
||||
cfg.IntOpt('http_timeout',
|
||||
default=75,
|
||||
help=_('Time before aborting a request')),
|
||||
cfg.IntOpt('retries',
|
||||
default=2,
|
||||
help=_('Number of time a request should be retried')),
|
||||
cfg.IntOpt('redirects',
|
||||
default=2,
|
||||
help=_('Number of times a redirect should be followed')),
|
||||
cfg.ListOpt('nsx_controllers',
|
||||
deprecated_name='nvp_controllers',
|
||||
help=_("Lists the NSX controllers in this cluster")),
|
||||
cfg.IntOpt('conn_idle_timeout',
|
||||
default=900,
|
||||
help=_('Reconnect connection to nsx if not used within this '
|
||||
'amount of time.')),
|
||||
]
|
||||
|
||||
cluster_opts = [
|
||||
cfg.StrOpt('default_tz_uuid',
|
||||
help=_("This is uuid of the default NSX Transport zone that "
|
||||
"will be used for creating tunneled isolated "
|
||||
"\"Neutron\" networks. It needs to be created in NSX "
|
||||
"before starting Neutron with the nsx plugin.")),
|
||||
cfg.StrOpt('default_l3_gw_service_uuid',
|
||||
help=_("Unique identifier of the NSX L3 Gateway service "
|
||||
"which will be used for implementing routers and "
|
||||
"floating IPs")),
|
||||
cfg.StrOpt('default_l2_gw_service_uuid',
|
||||
help=_("Unique identifier of the NSX L2 Gateway service "
|
||||
"which will be used by default for network gateways")),
|
||||
cfg.StrOpt('default_service_cluster_uuid',
|
||||
help=_("Unique identifier of the Service Cluster which will "
|
||||
"be used by logical services like dhcp and metadata")),
|
||||
cfg.StrOpt('default_interface_name', default='breth0',
|
||||
help=_("Name of the interface on a L2 Gateway transport node"
|
||||
"which should be used by default when setting up a "
|
||||
"network connection")),
|
||||
]
|
||||
|
||||
DEFAULT_STATUS_CHECK_INTERVAL = 2000
|
||||
|
||||
vcns_opts = [
|
||||
cfg.StrOpt('user',
|
||||
default='admin',
|
||||
help=_('User name for vsm')),
|
||||
cfg.StrOpt('password',
|
||||
default='default',
|
||||
secret=True,
|
||||
help=_('Password for vsm')),
|
||||
cfg.StrOpt('manager_uri',
|
||||
help=_('uri for vsm')),
|
||||
cfg.StrOpt('datacenter_moid',
|
||||
help=_('Optional parameter identifying the ID of datacenter '
|
||||
'to deploy NSX Edges')),
|
||||
cfg.StrOpt('deployment_container_id',
|
||||
help=_('Optional parameter identifying the ID of datastore to '
|
||||
'deploy NSX Edges')),
|
||||
cfg.StrOpt('resource_pool_id',
|
||||
help=_('Optional parameter identifying the ID of resource to '
|
||||
'deploy NSX Edges')),
|
||||
cfg.StrOpt('datastore_id',
|
||||
help=_('Optional parameter identifying the ID of datastore to '
|
||||
'deploy NSX Edges')),
|
||||
cfg.StrOpt('external_network',
|
||||
help=_('Network ID for physical network connectivity')),
|
||||
cfg.IntOpt('task_status_check_interval',
|
||||
default=DEFAULT_STATUS_CHECK_INTERVAL,
|
||||
help=_("Task status check interval"))
|
||||
]
|
||||
|
||||
# Register the configuration options
|
||||
cfg.CONF.register_opts(connection_opts)
|
||||
cfg.CONF.register_opts(cluster_opts)
|
||||
cfg.CONF.register_opts(vcns_opts, group="vcns")
|
||||
cfg.CONF.register_opts(base_opts, group="NSX")
|
||||
cfg.CONF.register_opts(sync_opts, group="NSX_SYNC")
|
||||
|
||||
|
||||
def validate_config_options():
|
||||
if cfg.CONF.NSX.replication_mode not in (ReplicationModes.SERVICE,
|
||||
ReplicationModes.SOURCE):
|
||||
error = (_("Invalid replication_mode: %s") %
|
||||
cfg.CONF.NSX.replication_mode)
|
||||
raise nsx_exc.NsxPluginException(err_msg=error)
|
@ -1,107 +0,0 @@
|
||||
# Copyright 2012 VMware, Inc
|
||||
#
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron.common import exceptions as n_exc
|
||||
|
||||
|
||||
class NsxPluginException(n_exc.NeutronException):
|
||||
message = _("An unexpected error occurred in the NSX Plugin: %(err_msg)s")
|
||||
|
||||
|
||||
class InvalidVersion(NsxPluginException):
|
||||
message = _("Unable to fulfill request with version %(version)s.")
|
||||
|
||||
|
||||
class InvalidConnection(NsxPluginException):
|
||||
message = _("Invalid NSX connection parameters: %(conn_params)s")
|
||||
|
||||
|
||||
class InvalidClusterConfiguration(NsxPluginException):
|
||||
message = _("Invalid cluster values: %(invalid_attrs)s. Please ensure "
|
||||
"that these values are specified in the [DEFAULT] "
|
||||
"section of the NSX plugin ini file.")
|
||||
|
||||
|
||||
class InvalidNovaZone(NsxPluginException):
|
||||
message = _("Unable to find cluster config entry "
|
||||
"for nova zone: %(nova_zone)s")
|
||||
|
||||
|
||||
class NoMorePortsException(NsxPluginException):
|
||||
message = _("Unable to create port on network %(network)s. "
|
||||
"Maximum number of ports reached")
|
||||
|
||||
|
||||
class NatRuleMismatch(NsxPluginException):
|
||||
message = _("While retrieving NAT rules, %(actual_rules)s were found "
|
||||
"whereas rules in the (%(min_rules)s,%(max_rules)s) interval "
|
||||
"were expected")
|
||||
|
||||
|
||||
class InvalidAttachmentType(NsxPluginException):
|
||||
message = _("Invalid NSX attachment type '%(attachment_type)s'")
|
||||
|
||||
|
||||
class MaintenanceInProgress(NsxPluginException):
|
||||
message = _("The networking backend is currently in maintenance mode and "
|
||||
"therefore unable to accept requests which modify its state. "
|
||||
"Please try later.")
|
||||
|
||||
|
||||
class L2GatewayAlreadyInUse(n_exc.Conflict):
|
||||
message = _("Gateway Service %(gateway)s is already in use")
|
||||
|
||||
|
||||
class InvalidSecurityCertificate(NsxPluginException):
|
||||
message = _("An invalid security certificate was specified for the "
|
||||
"gateway device. Certificates must be enclosed between "
|
||||
"'-----BEGIN CERTIFICATE-----' and "
|
||||
"'-----END CERTIFICATE-----'")
|
||||
|
||||
|
||||
class ServiceOverQuota(n_exc.Conflict):
|
||||
message = _("Quota exceeded for Vcns resource: %(overs)s: %(err_msg)s")
|
||||
|
||||
|
||||
class ServiceClusterUnavailable(NsxPluginException):
|
||||
message = _("Service cluster: '%(cluster_id)s' is unavailable. Please, "
|
||||
"check NSX setup and/or configuration")
|
||||
|
||||
|
||||
class PortConfigurationError(NsxPluginException):
|
||||
message = _("An error occurred while connecting LSN %(lsn_id)s "
|
||||
"and network %(net_id)s via port %(port_id)s")
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(PortConfigurationError, self).__init__(**kwargs)
|
||||
self.port_id = kwargs.get('port_id')
|
||||
|
||||
|
||||
class LsnNotFound(n_exc.NotFound):
|
||||
message = _('Unable to find LSN for %(entity)s %(entity_id)s')
|
||||
|
||||
|
||||
class LsnPortNotFound(n_exc.NotFound):
|
||||
message = (_('Unable to find port for LSN %(lsn_id)s '
|
||||
'and %(entity)s %(entity_id)s'))
|
||||
|
||||
|
||||
class LsnMigrationConflict(n_exc.Conflict):
|
||||
message = _("Unable to migrate network '%(net_id)s' to LSN: %(reason)s")
|
||||
|
||||
|
||||
class LsnConfigurationConflict(NsxPluginException):
|
||||
message = _("Configuration conflict on Logical Service Node %(lsn_id)s")
|
@ -1,318 +0,0 @@
|
||||
# Copyright 2013 VMware Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron.api.v2 import attributes as attr
|
||||
from neutron.common import exceptions as n_exc
|
||||
from neutron.extensions import multiprovidernet as mpnet
|
||||
from neutron.extensions import providernet as pnet
|
||||
from neutron.i18n import _LW
|
||||
from neutron.openstack.common import log
|
||||
from neutron.plugins.vmware.api_client import client
|
||||
from neutron.plugins.vmware.api_client import exception as api_exc
|
||||
from neutron.plugins.vmware.common import utils as vmw_utils
|
||||
from neutron.plugins.vmware.dbexts import db as nsx_db
|
||||
from neutron.plugins.vmware.dbexts import networkgw_db
|
||||
from neutron.plugins.vmware import nsx_cluster
|
||||
from neutron.plugins.vmware.nsxlib import l2gateway as l2gwlib
|
||||
from neutron.plugins.vmware.nsxlib import router as routerlib
|
||||
from neutron.plugins.vmware.nsxlib import secgroup as secgrouplib
|
||||
from neutron.plugins.vmware.nsxlib import switch as switchlib
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
def fetch_nsx_switches(session, cluster, neutron_net_id):
|
||||
"""Retrieve logical switches for a neutron network.
|
||||
|
||||
This function is optimized for fetching all the lswitches always
|
||||
with a single NSX query.
|
||||
If there is more than 1 logical switch (chained switches use case)
|
||||
NSX lswitches are queried by 'quantum_net_id' tag. Otherwise the NSX
|
||||
lswitch is directly retrieved by id (more efficient).
|
||||
"""
|
||||
nsx_switch_ids = get_nsx_switch_ids(session, cluster, neutron_net_id)
|
||||
if len(nsx_switch_ids) > 1:
|
||||
lswitches = switchlib.get_lswitches(cluster, neutron_net_id)
|
||||
else:
|
||||
lswitches = [switchlib.get_lswitch_by_id(
|
||||
cluster, nsx_switch_ids[0])]
|
||||
return lswitches
|
||||
|
||||
|
||||
def get_nsx_switch_ids(session, cluster, neutron_network_id):
|
||||
"""Return the NSX switch id for a given neutron network.
|
||||
|
||||
First lookup for mappings in Neutron database. If no mapping is
|
||||
found, query the NSX backend and add the mappings.
|
||||
"""
|
||||
nsx_switch_ids = nsx_db.get_nsx_switch_ids(
|
||||
session, neutron_network_id)
|
||||
if not nsx_switch_ids:
|
||||
# Find logical switches from backend.
|
||||
# This is a rather expensive query, but it won't be executed
|
||||
# more than once for each network in Neutron's lifetime
|
||||
nsx_switches = switchlib.get_lswitches(cluster, neutron_network_id)
|
||||
if not nsx_switches:
|
||||
LOG.warn(_LW("Unable to find NSX switches for Neutron network %s"),
|
||||
neutron_network_id)
|
||||
return
|
||||
nsx_switch_ids = []
|
||||
with session.begin(subtransactions=True):
|
||||
for nsx_switch in nsx_switches:
|
||||
nsx_switch_id = nsx_switch['uuid']
|
||||
nsx_switch_ids.append(nsx_switch_id)
|
||||
# Create DB mapping
|
||||
nsx_db.add_neutron_nsx_network_mapping(
|
||||
session,
|
||||
neutron_network_id,
|
||||
nsx_switch_id)
|
||||
return nsx_switch_ids
|
||||
|
||||
|
||||
def get_nsx_switch_and_port_id(session, cluster, neutron_port_id):
|
||||
"""Return the NSX switch and port uuids for a given neutron port.
|
||||
|
||||
First, look up the Neutron database. If not found, execute
|
||||
a query on NSX platform as the mapping might be missing because
|
||||
the port was created before upgrading to grizzly.
|
||||
|
||||
This routine also retrieves the identifier of the logical switch in
|
||||
the backend where the port is plugged. Prior to Icehouse this
|
||||
information was not available in the Neutron Database. For dealing
|
||||
with pre-existing records, this routine will query the backend
|
||||
for retrieving the correct switch identifier.
|
||||
|
||||
As of Icehouse release it is not indeed anymore possible to assume
|
||||
the backend logical switch identifier is equal to the neutron
|
||||
network identifier.
|
||||
"""
|
||||
nsx_switch_id, nsx_port_id = nsx_db.get_nsx_switch_and_port_id(
|
||||
session, neutron_port_id)
|
||||
if not nsx_switch_id:
|
||||
# Find logical switch for port from backend
|
||||
# This is a rather expensive query, but it won't be executed
|
||||
# more than once for each port in Neutron's lifetime
|
||||
nsx_ports = switchlib.query_lswitch_lports(
|
||||
cluster, '*', relations='LogicalSwitchConfig',
|
||||
filters={'tag': neutron_port_id,
|
||||
'tag_scope': 'q_port_id'})
|
||||
# Only one result expected
|
||||
# NOTE(salv-orlando): Not handling the case where more than one
|
||||
# port is found with the same neutron port tag
|
||||
if not nsx_ports:
|
||||
LOG.warn(_LW("Unable to find NSX port for Neutron port %s"),
|
||||
neutron_port_id)
|
||||
# This method is supposed to return a tuple
|
||||
return None, None
|
||||
nsx_port = nsx_ports[0]
|
||||
nsx_switch_id = (nsx_port['_relations']
|
||||
['LogicalSwitchConfig']['uuid'])
|
||||
if nsx_port_id:
|
||||
# Mapping already exists. Delete before recreating
|
||||
nsx_db.delete_neutron_nsx_port_mapping(
|
||||
session, neutron_port_id)
|
||||
else:
|
||||
nsx_port_id = nsx_port['uuid']
|
||||
# (re)Create DB mapping
|
||||
nsx_db.add_neutron_nsx_port_mapping(
|
||||
session, neutron_port_id,
|
||||
nsx_switch_id, nsx_port_id)
|
||||
return nsx_switch_id, nsx_port_id
|
||||
|
||||
|
||||
def get_nsx_security_group_id(session, cluster, neutron_id):
|
||||
"""Return the NSX sec profile uuid for a given neutron sec group.
|
||||
|
||||
First, look up the Neutron database. If not found, execute
|
||||
a query on NSX platform as the mapping might be missing.
|
||||
NOTE: Security groups are called 'security profiles' on the NSX backend.
|
||||
"""
|
||||
nsx_id = nsx_db.get_nsx_security_group_id(session, neutron_id)
|
||||
if not nsx_id:
|
||||
# Find security profile on backend.
|
||||
# This is a rather expensive query, but it won't be executed
|
||||
# more than once for each security group in Neutron's lifetime
|
||||
nsx_sec_profiles = secgrouplib.query_security_profiles(
|
||||
cluster, '*',
|
||||
filters={'tag': neutron_id,
|
||||
'tag_scope': 'q_sec_group_id'})
|
||||
# Only one result expected
|
||||
# NOTE(salv-orlando): Not handling the case where more than one
|
||||
# security profile is found with the same neutron port tag
|
||||
if not nsx_sec_profiles:
|
||||
LOG.warn(_LW("Unable to find NSX security profile for Neutron "
|
||||
"security group %s"), neutron_id)
|
||||
return
|
||||
elif len(nsx_sec_profiles) > 1:
|
||||
LOG.warn(_LW("Multiple NSX security profiles found for Neutron "
|
||||
"security group %s"), neutron_id)
|
||||
nsx_sec_profile = nsx_sec_profiles[0]
|
||||
nsx_id = nsx_sec_profile['uuid']
|
||||
with session.begin(subtransactions=True):
|
||||
# Create DB mapping
|
||||
nsx_db.add_neutron_nsx_security_group_mapping(
|
||||
session, neutron_id, nsx_id)
|
||||
return nsx_id
|
||||
|
||||
|
||||
def get_nsx_router_id(session, cluster, neutron_router_id):
|
||||
"""Return the NSX router uuid for a given neutron router.
|
||||
|
||||
First, look up the Neutron database. If not found, execute
|
||||
a query on NSX platform as the mapping might be missing.
|
||||
"""
|
||||
nsx_router_id = nsx_db.get_nsx_router_id(
|
||||
session, neutron_router_id)
|
||||
if not nsx_router_id:
|
||||
# Find logical router from backend.
|
||||
# This is a rather expensive query, but it won't be executed
|
||||
# more than once for each router in Neutron's lifetime
|
||||
nsx_routers = routerlib.query_lrouters(
|
||||
cluster, '*',
|
||||
filters={'tag': neutron_router_id,
|
||||
'tag_scope': 'q_router_id'})
|
||||
# Only one result expected
|
||||
# NOTE(salv-orlando): Not handling the case where more than one
|
||||
# port is found with the same neutron port tag
|
||||
if not nsx_routers:
|
||||
LOG.warn(_LW("Unable to find NSX router for Neutron router %s"),
|
||||
neutron_router_id)
|
||||
return
|
||||
nsx_router = nsx_routers[0]
|
||||
nsx_router_id = nsx_router['uuid']
|
||||
with session.begin(subtransactions=True):
|
||||
# Create DB mapping
|
||||
nsx_db.add_neutron_nsx_router_mapping(
|
||||
session,
|
||||
neutron_router_id,
|
||||
nsx_router_id)
|
||||
return nsx_router_id
|
||||
|
||||
|
||||
def create_nsx_cluster(cluster_opts, concurrent_connections, gen_timeout):
|
||||
cluster = nsx_cluster.NSXCluster(**cluster_opts)
|
||||
|
||||
def _ctrl_split(x, y):
|
||||
return (x, int(y), True)
|
||||
|
||||
api_providers = [_ctrl_split(*ctrl.split(':'))
|
||||
for ctrl in cluster.nsx_controllers]
|
||||
cluster.api_client = client.NsxApiClient(
|
||||
api_providers, cluster.nsx_user, cluster.nsx_password,
|
||||
http_timeout=cluster.http_timeout,
|
||||
retries=cluster.retries,
|
||||
redirects=cluster.redirects,
|
||||
concurrent_connections=concurrent_connections,
|
||||
gen_timeout=gen_timeout)
|
||||
return cluster
|
||||
|
||||
|
||||
def get_nsx_device_status(cluster, nsx_uuid):
|
||||
try:
|
||||
status_up = l2gwlib.get_gateway_device_status(
|
||||
cluster, nsx_uuid)
|
||||
if status_up:
|
||||
return networkgw_db.STATUS_ACTIVE
|
||||
else:
|
||||
return networkgw_db.STATUS_DOWN
|
||||
except api_exc.NsxApiException:
|
||||
return networkgw_db.STATUS_UNKNOWN
|
||||
except n_exc.NotFound:
|
||||
return networkgw_db.ERROR
|
||||
|
||||
|
||||
def get_nsx_device_statuses(cluster, tenant_id):
|
||||
try:
|
||||
status_dict = l2gwlib.get_gateway_devices_status(
|
||||
cluster, tenant_id)
|
||||
return dict((nsx_device_id,
|
||||
networkgw_db.STATUS_ACTIVE if connected
|
||||
else networkgw_db.STATUS_DOWN) for
|
||||
(nsx_device_id, connected) in status_dict.iteritems())
|
||||
except api_exc.NsxApiException:
|
||||
# Do not make a NSX API exception fatal
|
||||
if tenant_id:
|
||||
LOG.warn(_LW("Unable to retrieve operational status for gateway "
|
||||
"devices belonging to tenant: %s"), tenant_id)
|
||||
else:
|
||||
LOG.warn(_LW("Unable to retrieve operational status for "
|
||||
"gateway devices"))
|
||||
|
||||
|
||||
def _convert_bindings_to_nsx_transport_zones(bindings):
|
||||
nsx_transport_zones_config = []
|
||||
for binding in bindings:
|
||||
transport_entry = {}
|
||||
if binding.binding_type in [vmw_utils.NetworkTypes.FLAT,
|
||||
vmw_utils.NetworkTypes.VLAN]:
|
||||
transport_entry['transport_type'] = (
|
||||
vmw_utils.NetworkTypes.BRIDGE)
|
||||
transport_entry['binding_config'] = {}
|
||||
vlan_id = binding.vlan_id
|
||||
if vlan_id:
|
||||
transport_entry['binding_config'] = (
|
||||
{'vlan_translation': [{'transport': vlan_id}]})
|
||||
else:
|
||||
transport_entry['transport_type'] = binding.binding_type
|
||||
transport_entry['zone_uuid'] = binding.phy_uuid
|
||||
nsx_transport_zones_config.append(transport_entry)
|
||||
return nsx_transport_zones_config
|
||||
|
||||
|
||||
def _convert_segments_to_nsx_transport_zones(segments, default_tz_uuid):
|
||||
nsx_transport_zones_config = []
|
||||
for transport_zone in segments:
|
||||
for value in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK,
|
||||
pnet.SEGMENTATION_ID]:
|
||||
if transport_zone.get(value) == attr.ATTR_NOT_SPECIFIED:
|
||||
transport_zone[value] = None
|
||||
|
||||
transport_entry = {}
|
||||
transport_type = transport_zone.get(pnet.NETWORK_TYPE)
|
||||
if transport_type in [vmw_utils.NetworkTypes.FLAT,
|
||||
vmw_utils.NetworkTypes.VLAN]:
|
||||
transport_entry['transport_type'] = (
|
||||
vmw_utils.NetworkTypes.BRIDGE)
|
||||
transport_entry['binding_config'] = {}
|
||||
vlan_id = transport_zone.get(pnet.SEGMENTATION_ID)
|
||||
if vlan_id:
|
||||
transport_entry['binding_config'] = (
|
||||
{'vlan_translation': [{'transport': vlan_id}]})
|
||||
else:
|
||||
transport_entry['transport_type'] = transport_type
|
||||
transport_entry['zone_uuid'] = (
|
||||
transport_zone[pnet.PHYSICAL_NETWORK] or default_tz_uuid)
|
||||
nsx_transport_zones_config.append(transport_entry)
|
||||
return nsx_transport_zones_config
|
||||
|
||||
|
||||
def convert_to_nsx_transport_zones(
|
||||
default_tz_uuid, network=None, bindings=None,
|
||||
default_transport_type=None):
|
||||
|
||||
# Convert fields from provider request to nsx format
|
||||
if (network and not attr.is_attr_set(
|
||||
network.get(mpnet.SEGMENTS))):
|
||||
return [{"zone_uuid": default_tz_uuid,
|
||||
"transport_type": default_transport_type}]
|
||||
|
||||
# Convert fields from db to nsx format
|
||||
if bindings:
|
||||
return _convert_bindings_to_nsx_transport_zones(bindings)
|
||||
|
||||
# If we end up here we need to convert multiprovider segments into nsx
|
||||
# transport zone configurations
|
||||
return _convert_segments_to_nsx_transport_zones(
|
||||
network.get(mpnet.SEGMENTS), default_tz_uuid)
|
@ -1,134 +0,0 @@
|
||||
# Copyright 2013 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron.openstack.common import log
|
||||
from neutron.plugins.vmware.common import nsx_utils
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
# Protocol number look up for supported protocols
|
||||
protocol_num_look_up = {'tcp': 6, 'icmp': 1, 'udp': 17}
|
||||
|
||||
|
||||
def _convert_to_nsx_rule(session, cluster, rule, with_id=False):
|
||||
"""Converts a Neutron security group rule to the NSX format.
|
||||
|
||||
This routine also replaces Neutron IDs with NSX UUIDs.
|
||||
"""
|
||||
nsx_rule = {}
|
||||
params = ['remote_ip_prefix', 'protocol',
|
||||
'remote_group_id', 'port_range_min',
|
||||
'port_range_max', 'ethertype']
|
||||
if with_id:
|
||||
params.append('id')
|
||||
|
||||
for param in params:
|
||||
value = rule.get(param)
|
||||
if param not in rule:
|
||||
nsx_rule[param] = value
|
||||
elif not value:
|
||||
pass
|
||||
elif param == 'remote_ip_prefix':
|
||||
nsx_rule['ip_prefix'] = rule['remote_ip_prefix']
|
||||
elif param == 'remote_group_id':
|
||||
nsx_rule['profile_uuid'] = nsx_utils.get_nsx_security_group_id(
|
||||
session, cluster, rule['remote_group_id'])
|
||||
|
||||
elif param == 'protocol':
|
||||
try:
|
||||
nsx_rule['protocol'] = int(rule['protocol'])
|
||||
except (ValueError, TypeError):
|
||||
nsx_rule['protocol'] = (
|
||||
protocol_num_look_up[rule['protocol']])
|
||||
else:
|
||||
nsx_rule[param] = value
|
||||
return nsx_rule
|
||||
|
||||
|
||||
def _convert_to_nsx_rules(session, cluster, rules, with_id=False):
|
||||
"""Converts a list of Neutron security group rules to the NSX format."""
|
||||
nsx_rules = {'logical_port_ingress_rules': [],
|
||||
'logical_port_egress_rules': []}
|
||||
for direction in ['logical_port_ingress_rules',
|
||||
'logical_port_egress_rules']:
|
||||
for rule in rules[direction]:
|
||||
nsx_rules[direction].append(
|
||||
_convert_to_nsx_rule(session, cluster, rule, with_id))
|
||||
return nsx_rules
|
||||
|
||||
|
||||
def get_security_group_rules_nsx_format(session, cluster,
|
||||
security_group_rules, with_id=False):
|
||||
"""Convert neutron security group rules into NSX format.
|
||||
|
||||
This routine splits Neutron security group rules into two lists, one
|
||||
for ingress rules and the other for egress rules.
|
||||
"""
|
||||
|
||||
def fields(rule):
|
||||
_fields = ['remote_ip_prefix', 'remote_group_id', 'protocol',
|
||||
'port_range_min', 'port_range_max', 'protocol', 'ethertype']
|
||||
if with_id:
|
||||
_fields.append('id')
|
||||
return dict((k, v) for k, v in rule.iteritems() if k in _fields)
|
||||
|
||||
ingress_rules = []
|
||||
egress_rules = []
|
||||
for rule in security_group_rules:
|
||||
if rule.get('souce_group_id'):
|
||||
rule['remote_group_id'] = nsx_utils.get_nsx_security_group_id(
|
||||
session, cluster, rule['remote_group_id'])
|
||||
|
||||
if rule['direction'] == 'ingress':
|
||||
ingress_rules.append(fields(rule))
|
||||
elif rule['direction'] == 'egress':
|
||||
egress_rules.append(fields(rule))
|
||||
rules = {'logical_port_ingress_rules': egress_rules,
|
||||
'logical_port_egress_rules': ingress_rules}
|
||||
return _convert_to_nsx_rules(session, cluster, rules, with_id)
|
||||
|
||||
|
||||
def merge_security_group_rules_with_current(session, cluster,
|
||||
new_rules, current_rules):
|
||||
merged_rules = get_security_group_rules_nsx_format(
|
||||
session, cluster, current_rules)
|
||||
for new_rule in new_rules:
|
||||
rule = new_rule['security_group_rule']
|
||||
if rule['direction'] == 'ingress':
|
||||
merged_rules['logical_port_egress_rules'].append(
|
||||
_convert_to_nsx_rule(session, cluster, rule))
|
||||
elif rule['direction'] == 'egress':
|
||||
merged_rules['logical_port_ingress_rules'].append(
|
||||
_convert_to_nsx_rule(session, cluster, rule))
|
||||
return merged_rules
|
||||
|
||||
|
||||
def remove_security_group_with_id_and_id_field(rules, rule_id):
|
||||
"""Remove rule by rule_id.
|
||||
|
||||
This function receives all of the current rule associated with a
|
||||
security group and then removes the rule that matches the rule_id. In
|
||||
addition it removes the id field in the dict with each rule since that
|
||||
should not be passed to nsx.
|
||||
"""
|
||||
for rule_direction in rules.values():
|
||||
item_to_remove = None
|
||||
for port_rule in rule_direction:
|
||||
if port_rule['id'] == rule_id:
|
||||
item_to_remove = port_rule
|
||||
else:
|
||||
# remove key from dictionary for NSX
|
||||
del port_rule['id']
|
||||
if item_to_remove:
|
||||
rule_direction.remove(item_to_remove)
|
@ -1,676 +0,0 @@
|
||||
# Copyright 2013 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import random
|
||||
|
||||
from oslo_serialization import jsonutils
|
||||
from oslo_utils import timeutils
|
||||
|
||||
from neutron.common import constants
|
||||
from neutron.common import exceptions
|
||||
from neutron import context
|
||||
from neutron.db import external_net_db
|
||||
from neutron.db import l3_db
|
||||
from neutron.db import models_v2
|
||||
from neutron.extensions import l3
|
||||
from neutron.i18n import _LE, _LI, _LW
|
||||
from neutron.openstack.common import log
|
||||
from neutron.openstack.common import loopingcall
|
||||
from neutron.plugins.vmware.api_client import exception as api_exc
|
||||
from neutron.plugins.vmware.common import exceptions as nsx_exc
|
||||
from neutron.plugins.vmware.common import nsx_utils
|
||||
from neutron.plugins.vmware import nsxlib
|
||||
from neutron.plugins.vmware.nsxlib import router as routerlib
|
||||
from neutron.plugins.vmware.nsxlib import switch as switchlib
|
||||
|
||||
# Maximum page size for a single request
|
||||
# NOTE(salv-orlando): This might become a version-dependent map should the
|
||||
# limit be raised in future versions
|
||||
MAX_PAGE_SIZE = 5000
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class NsxCache(object):
|
||||
"""A simple Cache for NSX resources.
|
||||
|
||||
Associates resource id with resource hash to rapidly identify
|
||||
updated resources.
|
||||
Each entry in the cache also stores the following information:
|
||||
- changed: the resource in the cache has been altered following
|
||||
an update or a delete
|
||||
- hit: the resource has been visited during an update (and possibly
|
||||
left unchanged)
|
||||
- data: current resource data
|
||||
- data_bk: backup of resource data prior to its removal
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
# Maps a uuid to the dict containing it
|
||||
self._uuid_dict_mappings = {}
|
||||
# Dicts for NSX cached resources
|
||||
self._lswitches = {}
|
||||
self._lswitchports = {}
|
||||
self._lrouters = {}
|
||||
|
||||
def __getitem__(self, key):
|
||||
# uuids are unique across the various types of resources
|
||||
# TODO(salv-orlando): Avoid lookups over all dictionaries
|
||||
# when retrieving items
|
||||
# Fetch lswitches, lports, or lrouters
|
||||
resources = self._uuid_dict_mappings[key]
|
||||
return resources[key]
|
||||
|
||||
def _clear_changed_flag_and_remove_from_cache(self, resources):
|
||||
# Clear the 'changed' attribute for all items
|
||||
for uuid, item in resources.items():
|
||||
if item.pop('changed', None) and not item.get('data'):
|
||||
# The item is not anymore in NSX, so delete it
|
||||
del resources[uuid]
|
||||
del self._uuid_dict_mappings[uuid]
|
||||
LOG.debug("Removed item %s from NSX object cache", uuid)
|
||||
|
||||
def _update_resources(self, resources, new_resources, clear_changed=True):
|
||||
if clear_changed:
|
||||
self._clear_changed_flag_and_remove_from_cache(resources)
|
||||
|
||||
def do_hash(item):
|
||||
return hash(jsonutils.dumps(item))
|
||||
|
||||
# Parse new data and identify new, deleted, and updated resources
|
||||
for item in new_resources:
|
||||
item_id = item['uuid']
|
||||
if resources.get(item_id):
|
||||
new_hash = do_hash(item)
|
||||
if new_hash != resources[item_id]['hash']:
|
||||
resources[item_id]['hash'] = new_hash
|
||||
resources[item_id]['changed'] = True
|
||||
resources[item_id]['data_bk'] = (
|
||||
resources[item_id]['data'])
|
||||
resources[item_id]['data'] = item
|
||||
# Mark the item as hit in any case
|
||||
resources[item_id]['hit'] = True
|
||||
LOG.debug("Updating item %s in NSX object cache", item_id)
|
||||
else:
|
||||
resources[item_id] = {'hash': do_hash(item)}
|
||||
resources[item_id]['hit'] = True
|
||||
resources[item_id]['changed'] = True
|
||||
resources[item_id]['data'] = item
|
||||
# add a uuid to dict mapping for easy retrieval
|
||||
# with __getitem__
|
||||
self._uuid_dict_mappings[item_id] = resources
|
||||
LOG.debug("Added item %s to NSX object cache", item_id)
|
||||
|
||||
def _delete_resources(self, resources):
|
||||
# Mark for removal all the elements which have not been visited.
|
||||
# And clear the 'hit' attribute.
|
||||
for to_delete in [k for (k, v) in resources.iteritems()
|
||||
if not v.pop('hit', False)]:
|
||||
resources[to_delete]['changed'] = True
|
||||
resources[to_delete]['data_bk'] = (
|
||||
resources[to_delete].pop('data', None))
|
||||
|
||||
def _get_resource_ids(self, resources, changed_only):
|
||||
if changed_only:
|
||||
return [k for (k, v) in resources.iteritems()
|
||||
if v.get('changed')]
|
||||
return resources.keys()
|
||||
|
||||
def get_lswitches(self, changed_only=False):
|
||||
return self._get_resource_ids(self._lswitches, changed_only)
|
||||
|
||||
def get_lrouters(self, changed_only=False):
|
||||
return self._get_resource_ids(self._lrouters, changed_only)
|
||||
|
||||
def get_lswitchports(self, changed_only=False):
|
||||
return self._get_resource_ids(self._lswitchports, changed_only)
|
||||
|
||||
def update_lswitch(self, lswitch):
|
||||
self._update_resources(self._lswitches, [lswitch], clear_changed=False)
|
||||
|
||||
def update_lrouter(self, lrouter):
|
||||
self._update_resources(self._lrouters, [lrouter], clear_changed=False)
|
||||
|
||||
def update_lswitchport(self, lswitchport):
|
||||
self._update_resources(self._lswitchports, [lswitchport],
|
||||
clear_changed=False)
|
||||
|
||||
def process_updates(self, lswitches=None,
|
||||
lrouters=None, lswitchports=None):
|
||||
self._update_resources(self._lswitches, lswitches)
|
||||
self._update_resources(self._lrouters, lrouters)
|
||||
self._update_resources(self._lswitchports, lswitchports)
|
||||
return (self._get_resource_ids(self._lswitches, changed_only=True),
|
||||
self._get_resource_ids(self._lrouters, changed_only=True),
|
||||
self._get_resource_ids(self._lswitchports, changed_only=True))
|
||||
|
||||
def process_deletes(self):
|
||||
self._delete_resources(self._lswitches)
|
||||
self._delete_resources(self._lrouters)
|
||||
self._delete_resources(self._lswitchports)
|
||||
return (self._get_resource_ids(self._lswitches, changed_only=True),
|
||||
self._get_resource_ids(self._lrouters, changed_only=True),
|
||||
self._get_resource_ids(self._lswitchports, changed_only=True))
|
||||
|
||||
|
||||
class SyncParameters(object):
|
||||
"""Defines attributes used by the synchronization procedure.
|
||||
|
||||
chunk_size: Actual chunk size
|
||||
extra_chunk_size: Additional data to fetch because of chunk size
|
||||
adjustment
|
||||
current_chunk: Counter of the current data chunk being synchronized
|
||||
Page cursors: markers for the next resource to fetch.
|
||||
'start' means page cursor unset for fetching 1st page
|
||||
init_sync_performed: True if the initial synchronization concluded
|
||||
"""
|
||||
|
||||
def __init__(self, min_chunk_size):
|
||||
self.chunk_size = min_chunk_size
|
||||
self.extra_chunk_size = 0
|
||||
self.current_chunk = 0
|
||||
self.ls_cursor = 'start'
|
||||
self.lr_cursor = 'start'
|
||||
self.lp_cursor = 'start'
|
||||
self.init_sync_performed = False
|
||||
self.total_size = 0
|
||||
|
||||
|
||||
def _start_loopingcall(min_chunk_size, state_sync_interval, func):
|
||||
"""Start a loopingcall for the synchronization task."""
|
||||
# Start a looping call to synchronize operational status
|
||||
# for neutron resources
|
||||
if not state_sync_interval:
|
||||
# do not start the looping call if specified
|
||||
# sync interval is 0
|
||||
return
|
||||
state_synchronizer = loopingcall.DynamicLoopingCall(
|
||||
func, sp=SyncParameters(min_chunk_size))
|
||||
state_synchronizer.start(
|
||||
periodic_interval_max=state_sync_interval)
|
||||
return state_synchronizer
|
||||
|
||||
|
||||
class NsxSynchronizer(object):
|
||||
|
||||
LS_URI = nsxlib._build_uri_path(
|
||||
switchlib.LSWITCH_RESOURCE, fields='uuid,tags,fabric_status',
|
||||
relations='LogicalSwitchStatus')
|
||||
LR_URI = nsxlib._build_uri_path(
|
||||
routerlib.LROUTER_RESOURCE, fields='uuid,tags,fabric_status',
|
||||
relations='LogicalRouterStatus')
|
||||
LP_URI = nsxlib._build_uri_path(
|
||||
switchlib.LSWITCHPORT_RESOURCE,
|
||||
parent_resource_id='*',
|
||||
fields='uuid,tags,fabric_status_up',
|
||||
relations='LogicalPortStatus')
|
||||
|
||||
def __init__(self, plugin, cluster, state_sync_interval,
|
||||
req_delay, min_chunk_size, max_rand_delay=0):
|
||||
random.seed()
|
||||
self._nsx_cache = NsxCache()
|
||||
# Store parameters as instance members
|
||||
# NOTE(salv-orlando): apologies if it looks java-ish
|
||||
self._plugin = plugin
|
||||
self._cluster = cluster
|
||||
self._req_delay = req_delay
|
||||
self._sync_interval = state_sync_interval
|
||||
self._max_rand_delay = max_rand_delay
|
||||
# Validate parameters
|
||||
if self._sync_interval < self._req_delay:
|
||||
err_msg = (_("Minimum request delay:%(req_delay)s must not "
|
||||
"exceed synchronization interval:%(sync_interval)s") %
|
||||
{'req_delay': self._req_delay,
|
||||
'sync_interval': self._sync_interval})
|
||||
LOG.error(err_msg)
|
||||
raise nsx_exc.NsxPluginException(err_msg=err_msg)
|
||||
# Backoff time in case of failures while fetching sync data
|
||||
self._sync_backoff = 1
|
||||
# Store the looping call in an instance variable to allow unit tests
|
||||
# for controlling its lifecycle
|
||||
self._sync_looping_call = _start_loopingcall(
|
||||
min_chunk_size, state_sync_interval, self._synchronize_state)
|
||||
|
||||
def _get_tag_dict(self, tags):
|
||||
return dict((tag.get('scope'), tag['tag']) for tag in tags)
|
||||
|
||||
def synchronize_network(self, context, neutron_network_data,
|
||||
lswitches=None):
|
||||
"""Synchronize a Neutron network with its NSX counterpart.
|
||||
|
||||
This routine synchronizes a set of switches when a Neutron
|
||||
network is mapped to multiple lswitches.
|
||||
"""
|
||||
if not lswitches:
|
||||
# Try to get logical switches from nsx
|
||||
try:
|
||||
lswitches = nsx_utils.fetch_nsx_switches(
|
||||
context.session, self._cluster,
|
||||
neutron_network_data['id'])
|
||||
except exceptions.NetworkNotFound:
|
||||
# TODO(salv-orlando): We should be catching
|
||||
# api_exc.ResourceNotFound here
|
||||
# The logical switch was not found
|
||||
LOG.warning(_LW("Logical switch for neutron network %s not "
|
||||
"found on NSX."), neutron_network_data['id'])
|
||||
lswitches = []
|
||||
else:
|
||||
for lswitch in lswitches:
|
||||
self._nsx_cache.update_lswitch(lswitch)
|
||||
# By default assume things go wrong
|
||||
status = constants.NET_STATUS_ERROR
|
||||
# In most cases lswitches will contain a single element
|
||||
for ls in lswitches:
|
||||
if not ls:
|
||||
# Logical switch was deleted
|
||||
break
|
||||
ls_status = ls['_relations']['LogicalSwitchStatus']
|
||||
if not ls_status['fabric_status']:
|
||||
status = constants.NET_STATUS_DOWN
|
||||
break
|
||||
else:
|
||||
# No switch was down or missing. Set status to ACTIVE unless
|
||||
# there were no switches in the first place!
|
||||
if lswitches:
|
||||
status = constants.NET_STATUS_ACTIVE
|
||||
# Update db object
|
||||
if status == neutron_network_data['status']:
|
||||
# do nothing
|
||||
return
|
||||
|
||||
with context.session.begin(subtransactions=True):
|
||||
try:
|
||||
network = self._plugin._get_network(context,
|
||||
neutron_network_data['id'])
|
||||
except exceptions.NetworkNotFound:
|
||||
pass
|
||||
else:
|
||||
network.status = status
|
||||
LOG.debug("Updating status for neutron resource %(q_id)s to:"
|
||||
" %(status)s",
|
||||
{'q_id': neutron_network_data['id'],
|
||||
'status': status})
|
||||
|
||||
def _synchronize_lswitches(self, ctx, ls_uuids, scan_missing=False):
|
||||
if not ls_uuids and not scan_missing:
|
||||
return
|
||||
neutron_net_ids = set()
|
||||
neutron_nsx_mappings = {}
|
||||
# TODO(salvatore-orlando): Deal with the case the tag
|
||||
# has been tampered with
|
||||
for ls_uuid in ls_uuids:
|
||||
# If the lswitch has been deleted, get backup copy of data
|
||||
lswitch = (self._nsx_cache[ls_uuid].get('data') or
|
||||
self._nsx_cache[ls_uuid].get('data_bk'))
|
||||
tags = self._get_tag_dict(lswitch['tags'])
|
||||
neutron_id = tags.get('quantum_net_id')
|
||||
neutron_net_ids.add(neutron_id)
|
||||
neutron_nsx_mappings[neutron_id] = (
|
||||
neutron_nsx_mappings.get(neutron_id, []) +
|
||||
[self._nsx_cache[ls_uuid]])
|
||||
# Fetch neutron networks from database
|
||||
filters = {'router:external': [False]}
|
||||
if not scan_missing:
|
||||
filters['id'] = neutron_net_ids
|
||||
|
||||
networks = self._plugin._get_collection(
|
||||
ctx, models_v2.Network, self._plugin._make_network_dict,
|
||||
filters=filters)
|
||||
|
||||
for network in networks:
|
||||
lswitches = neutron_nsx_mappings.get(network['id'], [])
|
||||
lswitches = [lsw.get('data') for lsw in lswitches]
|
||||
self.synchronize_network(ctx, network, lswitches)
|
||||
|
||||
def synchronize_router(self, context, neutron_router_data,
|
||||
lrouter=None):
|
||||
"""Synchronize a neutron router with its NSX counterpart."""
|
||||
if not lrouter:
|
||||
# Try to get router from nsx
|
||||
try:
|
||||
# This query will return the logical router status too
|
||||
nsx_router_id = nsx_utils.get_nsx_router_id(
|
||||
context.session, self._cluster, neutron_router_data['id'])
|
||||
if nsx_router_id:
|
||||
lrouter = routerlib.get_lrouter(
|
||||
self._cluster, nsx_router_id)
|
||||
except exceptions.NotFound:
|
||||
# NOTE(salv-orlando): We should be catching
|
||||
# api_exc.ResourceNotFound here
|
||||
# The logical router was not found
|
||||
LOG.warning(_LW("Logical router for neutron router %s not "
|
||||
"found on NSX."), neutron_router_data['id'])
|
||||
if lrouter:
|
||||
# Update the cache
|
||||
self._nsx_cache.update_lrouter(lrouter)
|
||||
|
||||
# Note(salv-orlando): It might worth adding a check to verify neutron
|
||||
# resource tag in nsx entity matches a Neutron id.
|
||||
# By default assume things go wrong
|
||||
status = constants.NET_STATUS_ERROR
|
||||
if lrouter:
|
||||
lr_status = (lrouter['_relations']
|
||||
['LogicalRouterStatus']
|
||||
['fabric_status'])
|
||||
status = (lr_status and
|
||||
constants.NET_STATUS_ACTIVE
|
||||
or constants.NET_STATUS_DOWN)
|
||||
# Update db object
|
||||
if status == neutron_router_data['status']:
|
||||
# do nothing
|
||||
return
|
||||
|
||||
with context.session.begin(subtransactions=True):
|
||||
try:
|
||||
router = self._plugin._get_router(context,
|
||||
neutron_router_data['id'])
|
||||
except l3.RouterNotFound:
|
||||
pass
|
||||
else:
|
||||
router.status = status
|
||||
LOG.debug("Updating status for neutron resource %(q_id)s to:"
|
||||
" %(status)s",
|
||||
{'q_id': neutron_router_data['id'],
|
||||
'status': status})
|
||||
|
||||
def _synchronize_lrouters(self, ctx, lr_uuids, scan_missing=False):
|
||||
if not lr_uuids and not scan_missing:
|
||||
return
|
||||
# TODO(salvatore-orlando): Deal with the case the tag
|
||||
# has been tampered with
|
||||
neutron_router_mappings = {}
|
||||
for lr_uuid in lr_uuids:
|
||||
lrouter = (self._nsx_cache[lr_uuid].get('data') or
|
||||
self._nsx_cache[lr_uuid].get('data_bk'))
|
||||
tags = self._get_tag_dict(lrouter['tags'])
|
||||
neutron_router_id = tags.get('q_router_id')
|
||||
if neutron_router_id:
|
||||
neutron_router_mappings[neutron_router_id] = (
|
||||
self._nsx_cache[lr_uuid])
|
||||
else:
|
||||
LOG.warn(_LW("Unable to find Neutron router id for "
|
||||
"NSX logical router: %s"), lr_uuid)
|
||||
# Fetch neutron routers from database
|
||||
filters = ({} if scan_missing else
|
||||
{'id': neutron_router_mappings.keys()})
|
||||
routers = self._plugin._get_collection(
|
||||
ctx, l3_db.Router, self._plugin._make_router_dict,
|
||||
filters=filters)
|
||||
for router in routers:
|
||||
lrouter = neutron_router_mappings.get(router['id'])
|
||||
self.synchronize_router(
|
||||
ctx, router, lrouter and lrouter.get('data'))
|
||||
|
||||
def synchronize_port(self, context, neutron_port_data,
|
||||
lswitchport=None, ext_networks=None):
|
||||
"""Synchronize a Neutron port with its NSX counterpart."""
|
||||
# Skip synchronization for ports on external networks
|
||||
if not ext_networks:
|
||||
ext_networks = [net['id'] for net in context.session.query(
|
||||
models_v2.Network).join(
|
||||
external_net_db.ExternalNetwork,
|
||||
(models_v2.Network.id ==
|
||||
external_net_db.ExternalNetwork.network_id))]
|
||||
if neutron_port_data['network_id'] in ext_networks:
|
||||
with context.session.begin(subtransactions=True):
|
||||
neutron_port_data['status'] = constants.PORT_STATUS_ACTIVE
|
||||
return
|
||||
|
||||
if not lswitchport:
|
||||
# Try to get port from nsx
|
||||
try:
|
||||
ls_uuid, lp_uuid = nsx_utils.get_nsx_switch_and_port_id(
|
||||
context.session, self._cluster, neutron_port_data['id'])
|
||||
if lp_uuid:
|
||||
lswitchport = switchlib.get_port(
|
||||
self._cluster, ls_uuid, lp_uuid,
|
||||
relations='LogicalPortStatus')
|
||||
except (exceptions.PortNotFoundOnNetwork):
|
||||
# NOTE(salv-orlando): We should be catching
|
||||
# api_exc.ResourceNotFound here instead
|
||||
# of PortNotFoundOnNetwork when the id exists but
|
||||
# the logical switch port was not found
|
||||
LOG.warning(_LW("Logical switch port for neutron port %s "
|
||||
"not found on NSX."), neutron_port_data['id'])
|
||||
lswitchport = None
|
||||
else:
|
||||
# If lswitchport is not None, update the cache.
|
||||
# It could be none if the port was deleted from the backend
|
||||
if lswitchport:
|
||||
self._nsx_cache.update_lswitchport(lswitchport)
|
||||
# Note(salv-orlando): It might worth adding a check to verify neutron
|
||||
# resource tag in nsx entity matches Neutron id.
|
||||
# By default assume things go wrong
|
||||
status = constants.PORT_STATUS_ERROR
|
||||
if lswitchport:
|
||||
lp_status = (lswitchport['_relations']
|
||||
['LogicalPortStatus']
|
||||
['fabric_status_up'])
|
||||
status = (lp_status and
|
||||
constants.PORT_STATUS_ACTIVE
|
||||
or constants.PORT_STATUS_DOWN)
|
||||
|
||||
# Update db object
|
||||
if status == neutron_port_data['status']:
|
||||
# do nothing
|
||||
return
|
||||
|
||||
with context.session.begin(subtransactions=True):
|
||||
try:
|
||||
port = self._plugin._get_port(context,
|
||||
neutron_port_data['id'])
|
||||
except exceptions.PortNotFound:
|
||||
pass
|
||||
else:
|
||||
port.status = status
|
||||
LOG.debug("Updating status for neutron resource %(q_id)s to:"
|
||||
" %(status)s",
|
||||
{'q_id': neutron_port_data['id'],
|
||||
'status': status})
|
||||
|
||||
def _synchronize_lswitchports(self, ctx, lp_uuids, scan_missing=False):
|
||||
if not lp_uuids and not scan_missing:
|
||||
return
|
||||
# Find Neutron port id by tag - the tag is already
|
||||
# loaded in memory, no reason for doing a db query
|
||||
# TODO(salvatore-orlando): Deal with the case the tag
|
||||
# has been tampered with
|
||||
neutron_port_mappings = {}
|
||||
for lp_uuid in lp_uuids:
|
||||
lport = (self._nsx_cache[lp_uuid].get('data') or
|
||||
self._nsx_cache[lp_uuid].get('data_bk'))
|
||||
tags = self._get_tag_dict(lport['tags'])
|
||||
neutron_port_id = tags.get('q_port_id')
|
||||
if neutron_port_id:
|
||||
neutron_port_mappings[neutron_port_id] = (
|
||||
self._nsx_cache[lp_uuid])
|
||||
# Fetch neutron ports from database
|
||||
# At the first sync we need to fetch all ports
|
||||
filters = ({} if scan_missing else
|
||||
{'id': neutron_port_mappings.keys()})
|
||||
# TODO(salv-orlando): Work out a solution for avoiding
|
||||
# this query
|
||||
ext_nets = [net['id'] for net in ctx.session.query(
|
||||
models_v2.Network).join(
|
||||
external_net_db.ExternalNetwork,
|
||||
(models_v2.Network.id ==
|
||||
external_net_db.ExternalNetwork.network_id))]
|
||||
ports = self._plugin._get_collection(
|
||||
ctx, models_v2.Port, self._plugin._make_port_dict,
|
||||
filters=filters)
|
||||
for port in ports:
|
||||
lswitchport = neutron_port_mappings.get(port['id'])
|
||||
self.synchronize_port(
|
||||
ctx, port, lswitchport and lswitchport.get('data'),
|
||||
ext_networks=ext_nets)
|
||||
|
||||
def _get_chunk_size(self, sp):
|
||||
# NOTE(salv-orlando): Try to use __future__ for this routine only?
|
||||
ratio = ((float(sp.total_size) / float(sp.chunk_size)) /
|
||||
(float(self._sync_interval) / float(self._req_delay)))
|
||||
new_size = max(1.0, ratio) * float(sp.chunk_size)
|
||||
return int(new_size) + (new_size - int(new_size) > 0)
|
||||
|
||||
def _fetch_data(self, uri, cursor, page_size):
|
||||
# If not cursor there is nothing to retrieve
|
||||
if cursor:
|
||||
if cursor == 'start':
|
||||
cursor = None
|
||||
# Chunk size tuning might, in some conditions, make it larger
|
||||
# than 5,000, which is the maximum page size allowed by the NSX
|
||||
# API. In this case the request should be split in multiple
|
||||
# requests. This is not ideal, and therefore a log warning will
|
||||
# be emitted.
|
||||
num_requests = page_size / (MAX_PAGE_SIZE + 1) + 1
|
||||
if num_requests > 1:
|
||||
LOG.warn(_LW("Requested page size is %(cur_chunk_size)d. "
|
||||
"It might be necessary to do %(num_requests)d "
|
||||
"round-trips to NSX for fetching data. Please "
|
||||
"tune sync parameters to ensure chunk size "
|
||||
"is less than %(max_page_size)d"),
|
||||
{'cur_chunk_size': page_size,
|
||||
'num_requests': num_requests,
|
||||
'max_page_size': MAX_PAGE_SIZE})
|
||||
# Only the first request might return the total size,
|
||||
# subsequent requests will definetely not
|
||||
results, cursor, total_size = nsxlib.get_single_query_page(
|
||||
uri, self._cluster, cursor,
|
||||
min(page_size, MAX_PAGE_SIZE))
|
||||
for _req in range(num_requests - 1):
|
||||
# If no cursor is returned break the cycle as there is no
|
||||
# actual need to perform multiple requests (all fetched)
|
||||
# This happens when the overall size of resources exceeds
|
||||
# the maximum page size, but the number for each single
|
||||
# resource type is below this threshold
|
||||
if not cursor:
|
||||
break
|
||||
req_results, cursor = nsxlib.get_single_query_page(
|
||||
uri, self._cluster, cursor,
|
||||
min(page_size, MAX_PAGE_SIZE))[:2]
|
||||
results.extend(req_results)
|
||||
# reset cursor before returning if we queried just to
|
||||
# know the number of entities
|
||||
return results, cursor if page_size else 'start', total_size
|
||||
return [], cursor, None
|
||||
|
||||
def _fetch_nsx_data_chunk(self, sp):
|
||||
base_chunk_size = sp.chunk_size
|
||||
chunk_size = base_chunk_size + sp.extra_chunk_size
|
||||
LOG.info(_LI("Fetching up to %s resources "
|
||||
"from NSX backend"), chunk_size)
|
||||
fetched = ls_count = lr_count = lp_count = 0
|
||||
lswitches = lrouters = lswitchports = []
|
||||
if sp.ls_cursor or sp.ls_cursor == 'start':
|
||||
(lswitches, sp.ls_cursor, ls_count) = self._fetch_data(
|
||||
self.LS_URI, sp.ls_cursor, chunk_size)
|
||||
fetched = len(lswitches)
|
||||
if fetched < chunk_size and sp.lr_cursor or sp.lr_cursor == 'start':
|
||||
(lrouters, sp.lr_cursor, lr_count) = self._fetch_data(
|
||||
self.LR_URI, sp.lr_cursor, max(chunk_size - fetched, 0))
|
||||
fetched += len(lrouters)
|
||||
if fetched < chunk_size and sp.lp_cursor or sp.lp_cursor == 'start':
|
||||
(lswitchports, sp.lp_cursor, lp_count) = self._fetch_data(
|
||||
self.LP_URI, sp.lp_cursor, max(chunk_size - fetched, 0))
|
||||
fetched += len(lswitchports)
|
||||
if sp.current_chunk == 0:
|
||||
# No cursors were provided. Then it must be possible to
|
||||
# calculate the total amount of data to fetch
|
||||
sp.total_size = ls_count + lr_count + lp_count
|
||||
LOG.debug("Total data size: %d", sp.total_size)
|
||||
sp.chunk_size = self._get_chunk_size(sp)
|
||||
# Calculate chunk size adjustment
|
||||
sp.extra_chunk_size = sp.chunk_size - base_chunk_size
|
||||
LOG.debug("Fetched %(num_lswitches)d logical switches, "
|
||||
"%(num_lswitchports)d logical switch ports,"
|
||||
"%(num_lrouters)d logical routers",
|
||||
{'num_lswitches': len(lswitches),
|
||||
'num_lswitchports': len(lswitchports),
|
||||
'num_lrouters': len(lrouters)})
|
||||
return (lswitches, lrouters, lswitchports)
|
||||
|
||||
def _synchronize_state(self, sp):
|
||||
# If the plugin has been destroyed, stop the LoopingCall
|
||||
if not self._plugin:
|
||||
raise loopingcall.LoopingCallDone()
|
||||
start = timeutils.utcnow()
|
||||
# Reset page cursor variables if necessary
|
||||
if sp.current_chunk == 0:
|
||||
sp.ls_cursor = sp.lr_cursor = sp.lp_cursor = 'start'
|
||||
LOG.info(_LI("Running state synchronization task. Chunk: %s"),
|
||||
sp.current_chunk)
|
||||
# Fetch chunk_size data from NSX
|
||||
try:
|
||||
(lswitches, lrouters, lswitchports) = (
|
||||
self._fetch_nsx_data_chunk(sp))
|
||||
except (api_exc.RequestTimeout, api_exc.NsxApiException):
|
||||
sleep_interval = self._sync_backoff
|
||||
# Cap max back off to 64 seconds
|
||||
self._sync_backoff = min(self._sync_backoff * 2, 64)
|
||||
LOG.exception(_LE("An error occurred while communicating with "
|
||||
"NSX backend. Will retry synchronization "
|
||||
"in %d seconds"), sleep_interval)
|
||||
return sleep_interval
|
||||
LOG.debug("Time elapsed querying NSX: %s",
|
||||
timeutils.utcnow() - start)
|
||||
if sp.total_size:
|
||||
num_chunks = ((sp.total_size / sp.chunk_size) +
|
||||
(sp.total_size % sp.chunk_size != 0))
|
||||
else:
|
||||
num_chunks = 1
|
||||
LOG.debug("Number of chunks: %d", num_chunks)
|
||||
# Find objects which have changed on NSX side and need
|
||||
# to be synchronized
|
||||
LOG.debug("Processing NSX cache for updated objects")
|
||||
(ls_uuids, lr_uuids, lp_uuids) = self._nsx_cache.process_updates(
|
||||
lswitches, lrouters, lswitchports)
|
||||
# Process removed objects only at the last chunk
|
||||
scan_missing = (sp.current_chunk == num_chunks - 1 and
|
||||
not sp.init_sync_performed)
|
||||
if sp.current_chunk == num_chunks - 1:
|
||||
LOG.debug("Processing NSX cache for deleted objects")
|
||||
self._nsx_cache.process_deletes()
|
||||
ls_uuids = self._nsx_cache.get_lswitches(
|
||||
changed_only=not scan_missing)
|
||||
lr_uuids = self._nsx_cache.get_lrouters(
|
||||
changed_only=not scan_missing)
|
||||
lp_uuids = self._nsx_cache.get_lswitchports(
|
||||
changed_only=not scan_missing)
|
||||
LOG.debug("Time elapsed hashing data: %s",
|
||||
timeutils.utcnow() - start)
|
||||
# Get an admin context
|
||||
ctx = context.get_admin_context()
|
||||
# Synchronize with database
|
||||
self._synchronize_lswitches(ctx, ls_uuids,
|
||||
scan_missing=scan_missing)
|
||||
self._synchronize_lrouters(ctx, lr_uuids,
|
||||
scan_missing=scan_missing)
|
||||
self._synchronize_lswitchports(ctx, lp_uuids,
|
||||
scan_missing=scan_missing)
|
||||
# Increase chunk counter
|
||||
LOG.info(_LI("Synchronization for chunk %(chunk_num)d of "
|
||||
"%(total_chunks)d performed"),
|
||||
{'chunk_num': sp.current_chunk + 1,
|
||||
'total_chunks': num_chunks})
|
||||
sp.current_chunk = (sp.current_chunk + 1) % num_chunks
|
||||
added_delay = 0
|
||||
if sp.current_chunk == 0:
|
||||
# Ensure init_sync_performed is True
|
||||
if not sp.init_sync_performed:
|
||||
sp.init_sync_performed = True
|
||||
# Add additional random delay
|
||||
added_delay = random.randint(0, self._max_rand_delay)
|
||||
LOG.debug("Time elapsed at end of sync: %s",
|
||||
timeutils.utcnow() - start)
|
||||
return self._sync_interval / num_chunks + added_delay
|
@ -1,67 +0,0 @@
|
||||
# Copyright 2013 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import hashlib
|
||||
|
||||
from neutron.api.v2 import attributes
|
||||
from neutron.openstack.common import log
|
||||
from neutron import version
|
||||
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
MAX_DISPLAY_NAME_LEN = 40
|
||||
NEUTRON_VERSION = version.version_info.release_string()
|
||||
|
||||
|
||||
# Allowed network types for the NSX Plugin
|
||||
class NetworkTypes(object):
|
||||
"""Allowed provider network types for the NSX Plugin."""
|
||||
L3_EXT = 'l3_ext'
|
||||
STT = 'stt'
|
||||
GRE = 'gre'
|
||||
FLAT = 'flat'
|
||||
VLAN = 'vlan'
|
||||
BRIDGE = 'bridge'
|
||||
|
||||
|
||||
def get_tags(**kwargs):
|
||||
tags = ([dict(tag=value, scope=key)
|
||||
for key, value in kwargs.iteritems()])
|
||||
tags.append({"tag": NEUTRON_VERSION, "scope": "quantum"})
|
||||
return sorted(tags)
|
||||
|
||||
|
||||
def device_id_to_vm_id(device_id, obfuscate=False):
|
||||
# device_id can be longer than 40 characters, for example
|
||||
# a device_id for a dhcp port is like the following:
|
||||
#
|
||||
# dhcp83b5fdeb-e3b4-5e18-ac5f-55161...80747326-47d7-46c2-a87a-cf6d5194877c
|
||||
#
|
||||
# To fit it into an NSX tag we need to hash it, however device_id
|
||||
# used for ports associated to VM's are small enough so let's skip the
|
||||
# hashing
|
||||
if len(device_id) > MAX_DISPLAY_NAME_LEN or obfuscate:
|
||||
return hashlib.sha1(device_id).hexdigest()
|
||||
else:
|
||||
return device_id
|
||||
|
||||
|
||||
def check_and_truncate(display_name):
|
||||
if (attributes.is_attr_set(display_name) and
|
||||
len(display_name) > MAX_DISPLAY_NAME_LEN):
|
||||
LOG.debug("Specified name:'%s' exceeds maximum length. "
|
||||
"It will be truncated on NSX", display_name)
|
||||
return display_name[:MAX_DISPLAY_NAME_LEN]
|
||||
return display_name or ''
|
@ -1,198 +0,0 @@
|
||||
# Copyright 2012 VMware, Inc.
|
||||
#
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_db import exception as db_exc
|
||||
from oslo_utils import excutils
|
||||
from sqlalchemy.orm import exc
|
||||
|
||||
import neutron.db.api as db
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.vmware.dbexts import nsx_models
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_network_bindings(session, network_id):
|
||||
session = session or db.get_session()
|
||||
return (session.query(nsx_models.TzNetworkBinding).
|
||||
filter_by(network_id=network_id).
|
||||
all())
|
||||
|
||||
|
||||
def get_network_bindings_by_vlanid_and_physical_net(session, vlan_id,
|
||||
phy_uuid):
|
||||
session = session or db.get_session()
|
||||
return (session.query(nsx_models.TzNetworkBinding).
|
||||
filter_by(vlan_id=vlan_id, phy_uuid=phy_uuid).
|
||||
all())
|
||||
|
||||
|
||||
def delete_network_bindings(session, network_id):
|
||||
return (session.query(nsx_models.TzNetworkBinding).
|
||||
filter_by(network_id=network_id).delete())
|
||||
|
||||
|
||||
def add_network_binding(session, network_id, binding_type, phy_uuid, vlan_id):
|
||||
with session.begin(subtransactions=True):
|
||||
binding = nsx_models.TzNetworkBinding(network_id, binding_type,
|
||||
phy_uuid, vlan_id)
|
||||
session.add(binding)
|
||||
return binding
|
||||
|
||||
|
||||
def add_neutron_nsx_network_mapping(session, neutron_id, nsx_switch_id):
|
||||
with session.begin(subtransactions=True):
|
||||
mapping = nsx_models.NeutronNsxNetworkMapping(
|
||||
neutron_id=neutron_id, nsx_id=nsx_switch_id)
|
||||
session.add(mapping)
|
||||
return mapping
|
||||
|
||||
|
||||
def add_neutron_nsx_port_mapping(session, neutron_id,
|
||||
nsx_switch_id, nsx_port_id):
|
||||
session.begin(subtransactions=True)
|
||||
try:
|
||||
mapping = nsx_models.NeutronNsxPortMapping(
|
||||
neutron_id, nsx_switch_id, nsx_port_id)
|
||||
session.add(mapping)
|
||||
session.commit()
|
||||
except db_exc.DBDuplicateEntry:
|
||||
with excutils.save_and_reraise_exception() as ctxt:
|
||||
session.rollback()
|
||||
# do not complain if the same exact mapping is being added,
|
||||
# otherwise re-raise because even though it is possible for the
|
||||
# same neutron port to map to different back-end ports over time,
|
||||
# this should not occur whilst a mapping already exists
|
||||
current = get_nsx_switch_and_port_id(session, neutron_id)
|
||||
if current[1] == nsx_port_id:
|
||||
LOG.debug("Port mapping for %s already available",
|
||||
neutron_id)
|
||||
ctxt.reraise = False
|
||||
except db_exc.DBError:
|
||||
with excutils.save_and_reraise_exception():
|
||||
# rollback for any other db error
|
||||
session.rollback()
|
||||
return mapping
|
||||
|
||||
|
||||
def add_neutron_nsx_router_mapping(session, neutron_id, nsx_router_id):
|
||||
with session.begin(subtransactions=True):
|
||||
mapping = nsx_models.NeutronNsxRouterMapping(
|
||||
neutron_id=neutron_id, nsx_id=nsx_router_id)
|
||||
session.add(mapping)
|
||||
return mapping
|
||||
|
||||
|
||||
def add_neutron_nsx_security_group_mapping(session, neutron_id, nsx_id):
|
||||
"""Map a Neutron security group to a NSX security profile.
|
||||
|
||||
:param session: a valid database session object
|
||||
:param neutron_id: a neutron security group identifier
|
||||
:param nsx_id: a nsx security profile identifier
|
||||
"""
|
||||
with session.begin(subtransactions=True):
|
||||
mapping = nsx_models.NeutronNsxSecurityGroupMapping(
|
||||
neutron_id=neutron_id, nsx_id=nsx_id)
|
||||
session.add(mapping)
|
||||
return mapping
|
||||
|
||||
|
||||
def get_nsx_switch_ids(session, neutron_id):
|
||||
# This function returns a list of NSX switch identifiers because of
|
||||
# the possibility of chained logical switches
|
||||
return [mapping['nsx_id'] for mapping in
|
||||
session.query(nsx_models.NeutronNsxNetworkMapping).filter_by(
|
||||
neutron_id=neutron_id)]
|
||||
|
||||
|
||||
def get_nsx_switch_and_port_id(session, neutron_id):
|
||||
try:
|
||||
mapping = (session.query(nsx_models.NeutronNsxPortMapping).
|
||||
filter_by(neutron_id=neutron_id).
|
||||
one())
|
||||
return mapping['nsx_switch_id'], mapping['nsx_port_id']
|
||||
except exc.NoResultFound:
|
||||
LOG.debug("NSX identifiers for neutron port %s not yet "
|
||||
"stored in Neutron DB", neutron_id)
|
||||
return None, None
|
||||
|
||||
|
||||
def get_nsx_router_id(session, neutron_id):
|
||||
try:
|
||||
mapping = (session.query(nsx_models.NeutronNsxRouterMapping).
|
||||
filter_by(neutron_id=neutron_id).one())
|
||||
return mapping['nsx_id']
|
||||
except exc.NoResultFound:
|
||||
LOG.debug("NSX identifiers for neutron router %s not yet "
|
||||
"stored in Neutron DB", neutron_id)
|
||||
|
||||
|
||||
def get_nsx_security_group_id(session, neutron_id):
|
||||
"""Return the id of a security group in the NSX backend.
|
||||
|
||||
Note: security groups are called 'security profiles' in NSX
|
||||
"""
|
||||
try:
|
||||
mapping = (session.query(nsx_models.NeutronNsxSecurityGroupMapping).
|
||||
filter_by(neutron_id=neutron_id).
|
||||
one())
|
||||
return mapping['nsx_id']
|
||||
except exc.NoResultFound:
|
||||
LOG.debug("NSX identifiers for neutron security group %s not yet "
|
||||
"stored in Neutron DB", neutron_id)
|
||||
return None
|
||||
|
||||
|
||||
def _delete_by_neutron_id(session, model, neutron_id):
|
||||
return session.query(model).filter_by(neutron_id=neutron_id).delete()
|
||||
|
||||
|
||||
def delete_neutron_nsx_port_mapping(session, neutron_id):
|
||||
return _delete_by_neutron_id(
|
||||
session, nsx_models.NeutronNsxPortMapping, neutron_id)
|
||||
|
||||
|
||||
def delete_neutron_nsx_router_mapping(session, neutron_id):
|
||||
return _delete_by_neutron_id(
|
||||
session, nsx_models.NeutronNsxRouterMapping, neutron_id)
|
||||
|
||||
|
||||
def unset_default_network_gateways(session):
|
||||
with session.begin(subtransactions=True):
|
||||
session.query(nsx_models.NetworkGateway).update(
|
||||
{nsx_models.NetworkGateway.default: False})
|
||||
|
||||
|
||||
def set_default_network_gateway(session, gw_id):
|
||||
with session.begin(subtransactions=True):
|
||||
gw = (session.query(nsx_models.NetworkGateway).
|
||||
filter_by(id=gw_id).one())
|
||||
gw['default'] = True
|
||||
|
||||
|
||||
def set_multiprovider_network(session, network_id):
|
||||
with session.begin(subtransactions=True):
|
||||
multiprovider_network = nsx_models.MultiProviderNetworks(
|
||||
network_id)
|
||||
session.add(multiprovider_network)
|
||||
return multiprovider_network
|
||||
|
||||
|
||||
def is_multiprovider_network(session, network_id):
|
||||
with session.begin(subtransactions=True):
|
||||
return bool(
|
||||
session.query(nsx_models.MultiProviderNetworks).filter_by(
|
||||
network_id=network_id).first())
|
@ -1,101 +0,0 @@
|
||||
# Copyright 2014 VMware, Inc.
|
||||
#
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
from oslo_db import exception as d_exc
|
||||
from sqlalchemy import orm
|
||||
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.vmware.common import exceptions as p_exc
|
||||
from neutron.plugins.vmware.dbexts import nsx_models
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def lsn_add(context, network_id, lsn_id):
|
||||
"""Add Logical Service Node information to persistent datastore."""
|
||||
with context.session.begin(subtransactions=True):
|
||||
lsn = nsx_models.Lsn(network_id, lsn_id)
|
||||
context.session.add(lsn)
|
||||
|
||||
|
||||
def lsn_remove(context, lsn_id):
|
||||
"""Remove Logical Service Node information from datastore given its id."""
|
||||
with context.session.begin(subtransactions=True):
|
||||
context.session.query(nsx_models.Lsn).filter_by(lsn_id=lsn_id).delete()
|
||||
|
||||
|
||||
def lsn_remove_for_network(context, network_id):
|
||||
"""Remove information about the Logical Service Node given its network."""
|
||||
with context.session.begin(subtransactions=True):
|
||||
context.session.query(nsx_models.Lsn).filter_by(
|
||||
net_id=network_id).delete()
|
||||
|
||||
|
||||
def lsn_get_for_network(context, network_id, raise_on_err=True):
|
||||
"""Retrieve LSN information given its network id."""
|
||||
query = context.session.query(nsx_models.Lsn)
|
||||
try:
|
||||
return query.filter_by(net_id=network_id).one()
|
||||
except (orm.exc.NoResultFound, d_exc.DBError):
|
||||
msg = _('Unable to find Logical Service Node for network %s')
|
||||
if raise_on_err:
|
||||
LOG.error(msg, network_id)
|
||||
raise p_exc.LsnNotFound(entity='network',
|
||||
entity_id=network_id)
|
||||
else:
|
||||
LOG.warn(msg, network_id)
|
||||
|
||||
|
||||
def lsn_port_add_for_lsn(context, lsn_port_id, subnet_id, mac, lsn_id):
|
||||
"""Add Logical Service Node Port information to persistent datastore."""
|
||||
with context.session.begin(subtransactions=True):
|
||||
lsn_port = nsx_models.LsnPort(lsn_port_id, subnet_id, mac, lsn_id)
|
||||
context.session.add(lsn_port)
|
||||
|
||||
|
||||
def lsn_port_get_for_subnet(context, subnet_id, raise_on_err=True):
|
||||
"""Return Logical Service Node Port information given its subnet id."""
|
||||
with context.session.begin(subtransactions=True):
|
||||
try:
|
||||
return (context.session.query(nsx_models.LsnPort).
|
||||
filter_by(sub_id=subnet_id).one())
|
||||
except (orm.exc.NoResultFound, d_exc.DBError):
|
||||
if raise_on_err:
|
||||
raise p_exc.LsnPortNotFound(lsn_id=None,
|
||||
entity='subnet',
|
||||
entity_id=subnet_id)
|
||||
|
||||
|
||||
def lsn_port_get_for_mac(context, mac_address, raise_on_err=True):
|
||||
"""Return Logical Service Node Port information given its mac address."""
|
||||
with context.session.begin(subtransactions=True):
|
||||
try:
|
||||
return (context.session.query(nsx_models.LsnPort).
|
||||
filter_by(mac_addr=mac_address).one())
|
||||
except (orm.exc.NoResultFound, d_exc.DBError):
|
||||
if raise_on_err:
|
||||
raise p_exc.LsnPortNotFound(lsn_id=None,
|
||||
entity='mac',
|
||||
entity_id=mac_address)
|
||||
|
||||
|
||||
def lsn_port_remove(context, lsn_port_id):
|
||||
"""Remove Logical Service Node port from the given Logical Service Node."""
|
||||
with context.session.begin(subtransactions=True):
|
||||
(context.session.query(nsx_models.LsnPort).
|
||||
filter_by(lsn_port_id=lsn_port_id).delete())
|
@ -1,62 +0,0 @@
|
||||
# Copyright 2013 VMware, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
from sqlalchemy.orm import exc
|
||||
|
||||
from neutron.api.v2 import attributes
|
||||
from neutron.db import db_base_plugin_v2
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.vmware.dbexts import nsx_models
|
||||
from neutron.plugins.vmware.extensions import maclearning as mac
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MacLearningDbMixin(object):
|
||||
"""Mixin class for mac learning."""
|
||||
|
||||
def _make_mac_learning_state_dict(self, port, fields=None):
|
||||
res = {'port_id': port['port_id'],
|
||||
mac.MAC_LEARNING: port[mac.MAC_LEARNING]}
|
||||
return self._fields(res, fields)
|
||||
|
||||
def _extend_port_mac_learning_state(self, port_res, port_db):
|
||||
state = port_db.mac_learning_state
|
||||
if state and state.mac_learning_enabled:
|
||||
port_res[mac.MAC_LEARNING] = state.mac_learning_enabled
|
||||
|
||||
# Register dict extend functions for ports
|
||||
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
|
||||
attributes.PORTS, ['_extend_port_mac_learning_state'])
|
||||
|
||||
def _update_mac_learning_state(self, context, port_id, enabled):
|
||||
try:
|
||||
query = self._model_query(context, nsx_models.MacLearningState)
|
||||
state = query.filter(
|
||||
nsx_models.MacLearningState.port_id == port_id).one()
|
||||
state.update({mac.MAC_LEARNING: enabled})
|
||||
except exc.NoResultFound:
|
||||
self._create_mac_learning_state(context,
|
||||
{'id': port_id,
|
||||
mac.MAC_LEARNING: enabled})
|
||||
|
||||
def _create_mac_learning_state(self, context, port):
|
||||
with context.session.begin(subtransactions=True):
|
||||
enabled = port[mac.MAC_LEARNING]
|
||||
state = nsx_models.MacLearningState(
|
||||
port_id=port['id'],
|
||||
mac_learning_enabled=enabled)
|
||||
context.session.add(state)
|
||||
return self._make_mac_learning_state_dict(state)
|
@ -1,461 +0,0 @@
|
||||
# Copyright 2013 VMware, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from sqlalchemy.orm import exc as sa_orm_exc
|
||||
|
||||
from neutron.api.v2 import attributes
|
||||
from neutron.common import exceptions
|
||||
from neutron.common import utils
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.openstack.common import uuidutils
|
||||
from neutron.plugins.vmware.dbexts import nsx_models
|
||||
from neutron.plugins.vmware.extensions import networkgw
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
DEVICE_OWNER_NET_GW_INTF = 'network:gateway-interface'
|
||||
NETWORK_ID = 'network_id'
|
||||
SEGMENTATION_TYPE = 'segmentation_type'
|
||||
SEGMENTATION_ID = 'segmentation_id'
|
||||
ALLOWED_CONNECTION_ATTRIBUTES = set((NETWORK_ID,
|
||||
SEGMENTATION_TYPE,
|
||||
SEGMENTATION_ID))
|
||||
# Constants for gateway device operational status
|
||||
STATUS_UNKNOWN = "UNKNOWN"
|
||||
STATUS_ERROR = "ERROR"
|
||||
STATUS_ACTIVE = "ACTIVE"
|
||||
STATUS_DOWN = "DOWN"
|
||||
|
||||
|
||||
class GatewayInUse(exceptions.InUse):
|
||||
message = _("Network Gateway '%(gateway_id)s' still has active mappings "
|
||||
"with one or more neutron networks.")
|
||||
|
||||
|
||||
class GatewayNotFound(exceptions.NotFound):
|
||||
message = _("Network Gateway %(gateway_id)s could not be found")
|
||||
|
||||
|
||||
class GatewayDeviceInUse(exceptions.InUse):
|
||||
message = _("Network Gateway Device '%(device_id)s' is still used by "
|
||||
"one or more network gateways.")
|
||||
|
||||
|
||||
class GatewayDeviceNotFound(exceptions.NotFound):
|
||||
message = _("Network Gateway Device %(device_id)s could not be found.")
|
||||
|
||||
|
||||
class GatewayDevicesNotFound(exceptions.NotFound):
|
||||
message = _("One or more Network Gateway Devices could not be found: "
|
||||
"%(device_ids)s.")
|
||||
|
||||
|
||||
class NetworkGatewayPortInUse(exceptions.InUse):
|
||||
message = _("Port '%(port_id)s' is owned by '%(device_owner)s' and "
|
||||
"therefore cannot be deleted directly via the port API.")
|
||||
|
||||
|
||||
class GatewayConnectionInUse(exceptions.InUse):
|
||||
message = _("The specified mapping '%(mapping)s' is already in use on "
|
||||
"network gateway '%(gateway_id)s'.")
|
||||
|
||||
|
||||
class MultipleGatewayConnections(exceptions.Conflict):
|
||||
message = _("Multiple network connections found on '%(gateway_id)s' "
|
||||
"with provided criteria.")
|
||||
|
||||
|
||||
class GatewayConnectionNotFound(exceptions.NotFound):
|
||||
message = _("The connection %(network_mapping_info)s was not found on the "
|
||||
"network gateway '%(network_gateway_id)s'")
|
||||
|
||||
|
||||
class NetworkGatewayUnchangeable(exceptions.InUse):
|
||||
message = _("The network gateway %(gateway_id)s "
|
||||
"cannot be updated or deleted")
|
||||
|
||||
|
||||
class NetworkGatewayMixin(networkgw.NetworkGatewayPluginBase):
|
||||
|
||||
gateway_resource = networkgw.GATEWAY_RESOURCE_NAME
|
||||
device_resource = networkgw.DEVICE_RESOURCE_NAME
|
||||
|
||||
def _get_network_gateway(self, context, gw_id):
|
||||
try:
|
||||
gw = self._get_by_id(context, nsx_models.NetworkGateway, gw_id)
|
||||
except sa_orm_exc.NoResultFound:
|
||||
raise GatewayNotFound(gateway_id=gw_id)
|
||||
return gw
|
||||
|
||||
def _make_gw_connection_dict(self, gw_conn):
|
||||
return {'port_id': gw_conn['port_id'],
|
||||
'segmentation_type': gw_conn['segmentation_type'],
|
||||
'segmentation_id': gw_conn['segmentation_id']}
|
||||
|
||||
def _make_network_gateway_dict(self, network_gateway, fields=None):
|
||||
device_list = []
|
||||
for d in network_gateway['devices']:
|
||||
device_list.append({'id': d['id'],
|
||||
'interface_name': d['interface_name']})
|
||||
res = {'id': network_gateway['id'],
|
||||
'name': network_gateway['name'],
|
||||
'default': network_gateway['default'],
|
||||
'devices': device_list,
|
||||
'tenant_id': network_gateway['tenant_id']}
|
||||
# Query gateway connections only if needed
|
||||
if not fields or 'ports' in fields:
|
||||
res['ports'] = [self._make_gw_connection_dict(conn)
|
||||
for conn in network_gateway.network_connections]
|
||||
return self._fields(res, fields)
|
||||
|
||||
def _set_mapping_info_defaults(self, mapping_info):
|
||||
if not mapping_info.get('segmentation_type'):
|
||||
mapping_info['segmentation_type'] = 'flat'
|
||||
if not mapping_info.get('segmentation_id'):
|
||||
mapping_info['segmentation_id'] = 0
|
||||
|
||||
def _validate_network_mapping_info(self, network_mapping_info):
|
||||
self._set_mapping_info_defaults(network_mapping_info)
|
||||
network_id = network_mapping_info.get(NETWORK_ID)
|
||||
if not network_id:
|
||||
raise exceptions.InvalidInput(
|
||||
error_message=_("A network identifier must be specified "
|
||||
"when connecting a network to a network "
|
||||
"gateway. Unable to complete operation"))
|
||||
connection_attrs = set(network_mapping_info.keys())
|
||||
if not connection_attrs.issubset(ALLOWED_CONNECTION_ATTRIBUTES):
|
||||
raise exceptions.InvalidInput(
|
||||
error_message=(_("Invalid keys found among the ones provided "
|
||||
"in request body: %(connection_attrs)s."),
|
||||
connection_attrs))
|
||||
seg_type = network_mapping_info.get(SEGMENTATION_TYPE)
|
||||
seg_id = network_mapping_info.get(SEGMENTATION_ID)
|
||||
# The NSX plugin accepts 0 as a valid vlan tag
|
||||
seg_id_valid = seg_id == 0 or utils.is_valid_vlan_tag(seg_id)
|
||||
if seg_type.lower() == 'flat' and seg_id:
|
||||
msg = _("Cannot specify a segmentation id when "
|
||||
"the segmentation type is flat")
|
||||
raise exceptions.InvalidInput(error_message=msg)
|
||||
elif (seg_type.lower() == 'vlan' and not seg_id_valid):
|
||||
msg = _("Invalid segmentation id (%d) for "
|
||||
"vlan segmentation type") % seg_id
|
||||
raise exceptions.InvalidInput(error_message=msg)
|
||||
return network_id
|
||||
|
||||
def _retrieve_gateway_connections(self, context, gateway_id,
|
||||
mapping_info={}, only_one=False):
|
||||
filters = {'network_gateway_id': [gateway_id]}
|
||||
for k, v in mapping_info.iteritems():
|
||||
if v and k != NETWORK_ID:
|
||||
filters[k] = [v]
|
||||
query = self._get_collection_query(context,
|
||||
nsx_models.NetworkConnection,
|
||||
filters)
|
||||
return query.one() if only_one else query.all()
|
||||
|
||||
def _unset_default_network_gateways(self, context):
|
||||
with context.session.begin(subtransactions=True):
|
||||
context.session.query(nsx_models.NetworkGateway).update(
|
||||
{nsx_models.NetworkGateway.default: False})
|
||||
|
||||
def _set_default_network_gateway(self, context, gw_id):
|
||||
with context.session.begin(subtransactions=True):
|
||||
gw = (context.session.query(nsx_models.NetworkGateway).
|
||||
filter_by(id=gw_id).one())
|
||||
gw['default'] = True
|
||||
|
||||
def prevent_network_gateway_port_deletion(self, context, port):
|
||||
"""Pre-deletion check.
|
||||
|
||||
Ensures a port will not be deleted if is being used by a network
|
||||
gateway. In that case an exception will be raised.
|
||||
"""
|
||||
if port['device_owner'] == DEVICE_OWNER_NET_GW_INTF:
|
||||
raise NetworkGatewayPortInUse(port_id=port['id'],
|
||||
device_owner=port['device_owner'])
|
||||
|
||||
def _validate_device_list(self, context, tenant_id, gateway_data):
|
||||
device_query = self._query_gateway_devices(
|
||||
context, filters={'id': [device['id']
|
||||
for device in gateway_data['devices']]})
|
||||
retrieved_device_ids = set()
|
||||
for device in device_query:
|
||||
retrieved_device_ids.add(device['id'])
|
||||
if device['tenant_id'] != tenant_id:
|
||||
raise GatewayDeviceNotFound(device_id=device['id'])
|
||||
missing_device_ids = (
|
||||
set(device['id'] for device in gateway_data['devices']) -
|
||||
retrieved_device_ids)
|
||||
if missing_device_ids:
|
||||
raise GatewayDevicesNotFound(
|
||||
device_ids=",".join(missing_device_ids))
|
||||
|
||||
def create_network_gateway(self, context, network_gateway,
|
||||
validate_device_list=True):
|
||||
gw_data = network_gateway[self.gateway_resource]
|
||||
tenant_id = self._get_tenant_id_for_create(context, gw_data)
|
||||
with context.session.begin(subtransactions=True):
|
||||
gw_db = nsx_models.NetworkGateway(
|
||||
id=gw_data.get('id', uuidutils.generate_uuid()),
|
||||
tenant_id=tenant_id,
|
||||
name=gw_data.get('name'))
|
||||
# Device list is guaranteed to be a valid list, but some devices
|
||||
# might still either not exist or belong to a different tenant
|
||||
if validate_device_list:
|
||||
self._validate_device_list(context, tenant_id, gw_data)
|
||||
gw_db.devices.extend(
|
||||
[nsx_models.NetworkGatewayDeviceReference(**device)
|
||||
for device in gw_data['devices']])
|
||||
context.session.add(gw_db)
|
||||
LOG.debug("Created network gateway with id:%s", gw_db['id'])
|
||||
return self._make_network_gateway_dict(gw_db)
|
||||
|
||||
def update_network_gateway(self, context, id, network_gateway):
|
||||
gw_data = network_gateway[self.gateway_resource]
|
||||
with context.session.begin(subtransactions=True):
|
||||
gw_db = self._get_network_gateway(context, id)
|
||||
if gw_db.default:
|
||||
raise NetworkGatewayUnchangeable(gateway_id=id)
|
||||
# Ensure there is something to update before doing it
|
||||
if any([gw_db[k] != gw_data[k] for k in gw_data]):
|
||||
gw_db.update(gw_data)
|
||||
LOG.debug("Updated network gateway with id:%s", id)
|
||||
return self._make_network_gateway_dict(gw_db)
|
||||
|
||||
def get_network_gateway(self, context, id, fields=None):
|
||||
gw_db = self._get_network_gateway(context, id)
|
||||
return self._make_network_gateway_dict(gw_db, fields)
|
||||
|
||||
def delete_network_gateway(self, context, id):
|
||||
with context.session.begin(subtransactions=True):
|
||||
gw_db = self._get_network_gateway(context, id)
|
||||
if gw_db.network_connections:
|
||||
raise GatewayInUse(gateway_id=id)
|
||||
if gw_db.default:
|
||||
raise NetworkGatewayUnchangeable(gateway_id=id)
|
||||
context.session.delete(gw_db)
|
||||
LOG.debug("Network gateway '%s' was destroyed.", id)
|
||||
|
||||
def get_network_gateways(self, context, filters=None, fields=None,
|
||||
sorts=None, limit=None, marker=None,
|
||||
page_reverse=False):
|
||||
marker_obj = self._get_marker_obj(
|
||||
context, 'network_gateway', limit, marker)
|
||||
return self._get_collection(context, nsx_models.NetworkGateway,
|
||||
self._make_network_gateway_dict,
|
||||
filters=filters, fields=fields,
|
||||
sorts=sorts, limit=limit,
|
||||
marker_obj=marker_obj,
|
||||
page_reverse=page_reverse)
|
||||
|
||||
def connect_network(self, context, network_gateway_id,
|
||||
network_mapping_info):
|
||||
network_id = self._validate_network_mapping_info(network_mapping_info)
|
||||
LOG.debug("Connecting network '%(network_id)s' to gateway "
|
||||
"'%(network_gateway_id)s'",
|
||||
{'network_id': network_id,
|
||||
'network_gateway_id': network_gateway_id})
|
||||
with context.session.begin(subtransactions=True):
|
||||
gw_db = self._get_network_gateway(context, network_gateway_id)
|
||||
tenant_id = self._get_tenant_id_for_create(context, gw_db)
|
||||
# TODO(salvatore-orlando): Leverage unique constraint instead
|
||||
# of performing another query!
|
||||
if self._retrieve_gateway_connections(context,
|
||||
network_gateway_id,
|
||||
network_mapping_info):
|
||||
raise GatewayConnectionInUse(mapping=network_mapping_info,
|
||||
gateway_id=network_gateway_id)
|
||||
# TODO(salvatore-orlando): Creating a port will give it an IP,
|
||||
# but we actually do not need any. Instead of wasting an IP we
|
||||
# should have a way to say a port shall not be associated with
|
||||
# any subnet
|
||||
try:
|
||||
# We pass the segmentation type and id too - the plugin
|
||||
# might find them useful as the network connection object
|
||||
# does not exist yet.
|
||||
# NOTE: they're not extended attributes, rather extra data
|
||||
# passed in the port structure to the plugin
|
||||
# TODO(salvatore-orlando): Verify optimal solution for
|
||||
# ownership of the gateway port
|
||||
port = self.create_port(context, {
|
||||
'port':
|
||||
{'tenant_id': tenant_id,
|
||||
'network_id': network_id,
|
||||
'mac_address': attributes.ATTR_NOT_SPECIFIED,
|
||||
'admin_state_up': True,
|
||||
'fixed_ips': [],
|
||||
'device_id': network_gateway_id,
|
||||
'device_owner': DEVICE_OWNER_NET_GW_INTF,
|
||||
'name': '',
|
||||
'gw:segmentation_type':
|
||||
network_mapping_info.get('segmentation_type'),
|
||||
'gw:segmentation_id':
|
||||
network_mapping_info.get('segmentation_id')}})
|
||||
except exceptions.NetworkNotFound:
|
||||
err_msg = (_("Requested network '%(network_id)s' not found."
|
||||
"Unable to create network connection on "
|
||||
"gateway '%(network_gateway_id)s") %
|
||||
{'network_id': network_id,
|
||||
'network_gateway_id': network_gateway_id})
|
||||
LOG.error(err_msg)
|
||||
raise exceptions.InvalidInput(error_message=err_msg)
|
||||
port_id = port['id']
|
||||
LOG.debug("Gateway port for '%(network_gateway_id)s' "
|
||||
"created on network '%(network_id)s':%(port_id)s",
|
||||
{'network_gateway_id': network_gateway_id,
|
||||
'network_id': network_id,
|
||||
'port_id': port_id})
|
||||
# Create NetworkConnection record
|
||||
network_mapping_info['port_id'] = port_id
|
||||
network_mapping_info['tenant_id'] = tenant_id
|
||||
gw_db.network_connections.append(
|
||||
nsx_models.NetworkConnection(**network_mapping_info))
|
||||
port_id = port['id']
|
||||
# now deallocate and recycle ip from the port
|
||||
for fixed_ip in port.get('fixed_ips', []):
|
||||
self._delete_ip_allocation(context, network_id,
|
||||
fixed_ip['subnet_id'],
|
||||
fixed_ip['ip_address'])
|
||||
LOG.debug("Ensured no Ip addresses are configured on port %s",
|
||||
port_id)
|
||||
return {'connection_info':
|
||||
{'network_gateway_id': network_gateway_id,
|
||||
'network_id': network_id,
|
||||
'port_id': port_id}}
|
||||
|
||||
def disconnect_network(self, context, network_gateway_id,
|
||||
network_mapping_info):
|
||||
network_id = self._validate_network_mapping_info(network_mapping_info)
|
||||
LOG.debug("Disconnecting network '%(network_id)s' from gateway "
|
||||
"'%(network_gateway_id)s'",
|
||||
{'network_id': network_id,
|
||||
'network_gateway_id': network_gateway_id})
|
||||
with context.session.begin(subtransactions=True):
|
||||
# Uniquely identify connection, otherwise raise
|
||||
try:
|
||||
net_connection = self._retrieve_gateway_connections(
|
||||
context, network_gateway_id,
|
||||
network_mapping_info, only_one=True)
|
||||
except sa_orm_exc.NoResultFound:
|
||||
raise GatewayConnectionNotFound(
|
||||
network_mapping_info=network_mapping_info,
|
||||
network_gateway_id=network_gateway_id)
|
||||
except sa_orm_exc.MultipleResultsFound:
|
||||
raise MultipleGatewayConnections(
|
||||
gateway_id=network_gateway_id)
|
||||
# Remove gateway port from network
|
||||
# FIXME(salvatore-orlando): Ensure state of port in NSX is
|
||||
# consistent with outcome of transaction
|
||||
self.delete_port(context, net_connection['port_id'],
|
||||
nw_gw_port_check=False)
|
||||
# Remove NetworkConnection record
|
||||
context.session.delete(net_connection)
|
||||
|
||||
def _make_gateway_device_dict(self, gateway_device, fields=None,
|
||||
include_nsx_id=False):
|
||||
res = {'id': gateway_device['id'],
|
||||
'name': gateway_device['name'],
|
||||
'status': gateway_device['status'],
|
||||
'connector_type': gateway_device['connector_type'],
|
||||
'connector_ip': gateway_device['connector_ip'],
|
||||
'tenant_id': gateway_device['tenant_id']}
|
||||
if include_nsx_id:
|
||||
# Return the NSX mapping as well. This attribute will not be
|
||||
# returned in the API response anyway. Ensure it will not be
|
||||
# filtered out in field selection.
|
||||
if fields:
|
||||
fields.append('nsx_id')
|
||||
res['nsx_id'] = gateway_device['nsx_id']
|
||||
return self._fields(res, fields)
|
||||
|
||||
def _get_gateway_device(self, context, device_id):
|
||||
try:
|
||||
return self._get_by_id(context,
|
||||
nsx_models.NetworkGatewayDevice,
|
||||
device_id)
|
||||
except sa_orm_exc.NoResultFound:
|
||||
raise GatewayDeviceNotFound(device_id=device_id)
|
||||
|
||||
def _is_device_in_use(self, context, device_id):
|
||||
query = self._get_collection_query(
|
||||
context, nsx_models.NetworkGatewayDeviceReference,
|
||||
{'id': [device_id]})
|
||||
return query.first()
|
||||
|
||||
def get_gateway_device(self, context, device_id, fields=None,
|
||||
include_nsx_id=False):
|
||||
return self._make_gateway_device_dict(
|
||||
self._get_gateway_device(context, device_id),
|
||||
fields, include_nsx_id)
|
||||
|
||||
def _query_gateway_devices(self, context,
|
||||
filters=None, sorts=None,
|
||||
limit=None, marker=None,
|
||||
page_reverse=None):
|
||||
marker_obj = self._get_marker_obj(
|
||||
context, 'gateway_device', limit, marker)
|
||||
return self._get_collection_query(context,
|
||||
nsx_models.NetworkGatewayDevice,
|
||||
filters=filters,
|
||||
sorts=sorts,
|
||||
limit=limit,
|
||||
marker_obj=marker_obj,
|
||||
page_reverse=page_reverse)
|
||||
|
||||
def get_gateway_devices(self, context, filters=None, fields=None,
|
||||
sorts=None, limit=None, marker=None,
|
||||
page_reverse=False, include_nsx_id=False):
|
||||
query = self._query_gateway_devices(context, filters, sorts, limit,
|
||||
marker, page_reverse)
|
||||
return [self._make_gateway_device_dict(row, fields, include_nsx_id)
|
||||
for row in query]
|
||||
|
||||
def create_gateway_device(self, context, gateway_device,
|
||||
initial_status=STATUS_UNKNOWN):
|
||||
device_data = gateway_device[self.device_resource]
|
||||
tenant_id = self._get_tenant_id_for_create(context, device_data)
|
||||
with context.session.begin(subtransactions=True):
|
||||
device_db = nsx_models.NetworkGatewayDevice(
|
||||
id=device_data.get('id', uuidutils.generate_uuid()),
|
||||
tenant_id=tenant_id,
|
||||
name=device_data.get('name'),
|
||||
connector_type=device_data['connector_type'],
|
||||
connector_ip=device_data['connector_ip'],
|
||||
status=initial_status)
|
||||
context.session.add(device_db)
|
||||
LOG.debug("Created network gateway device: %s", device_db['id'])
|
||||
return self._make_gateway_device_dict(device_db)
|
||||
|
||||
def update_gateway_device(self, context, gateway_device_id,
|
||||
gateway_device, include_nsx_id=False):
|
||||
device_data = gateway_device[self.device_resource]
|
||||
with context.session.begin(subtransactions=True):
|
||||
device_db = self._get_gateway_device(context, gateway_device_id)
|
||||
# Ensure there is something to update before doing it
|
||||
if any([device_db[k] != device_data[k] for k in device_data]):
|
||||
device_db.update(device_data)
|
||||
LOG.debug("Updated network gateway device: %s",
|
||||
gateway_device_id)
|
||||
return self._make_gateway_device_dict(
|
||||
device_db, include_nsx_id=include_nsx_id)
|
||||
|
||||
def delete_gateway_device(self, context, device_id):
|
||||
with context.session.begin(subtransactions=True):
|
||||
# A gateway device should not be deleted
|
||||
# if it is used in any network gateway service
|
||||
if self._is_device_in_use(context, device_id):
|
||||
raise GatewayDeviceInUse(device_id=device_id)
|
||||
device_db = self._get_gateway_device(context, device_id)
|
||||
context.session.delete(device_db)
|
||||
LOG.debug("Deleted network gateway device: %s.", device_id)
|
@ -1,260 +0,0 @@
|
||||
# Copyright 2013 VMware, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
from sqlalchemy.orm import exc
|
||||
|
||||
from neutron.api.v2 import attributes as attr
|
||||
from neutron.db import db_base_plugin_v2
|
||||
from neutron.db import models_v2
|
||||
from neutron.i18n import _LI
|
||||
from neutron.openstack.common import log
|
||||
from neutron.openstack.common import uuidutils
|
||||
from neutron.plugins.vmware.dbexts import nsx_models
|
||||
from neutron.plugins.vmware.extensions import qos
|
||||
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class QoSDbMixin(qos.QueuePluginBase):
|
||||
"""Mixin class to add queues."""
|
||||
|
||||
def create_qos_queue(self, context, qos_queue):
|
||||
q = qos_queue['qos_queue']
|
||||
with context.session.begin(subtransactions=True):
|
||||
qos_queue = nsx_models.QoSQueue(
|
||||
id=q.get('id', uuidutils.generate_uuid()),
|
||||
name=q.get('name'),
|
||||
tenant_id=q['tenant_id'],
|
||||
default=q.get('default'),
|
||||
min=q.get('min'),
|
||||
max=q.get('max'),
|
||||
qos_marking=q.get('qos_marking'),
|
||||
dscp=q.get('dscp'))
|
||||
context.session.add(qos_queue)
|
||||
return self._make_qos_queue_dict(qos_queue)
|
||||
|
||||
def get_qos_queue(self, context, queue_id, fields=None):
|
||||
return self._make_qos_queue_dict(
|
||||
self._get_qos_queue(context, queue_id), fields)
|
||||
|
||||
def _get_qos_queue(self, context, queue_id):
|
||||
try:
|
||||
return self._get_by_id(context, nsx_models.QoSQueue, queue_id)
|
||||
except exc.NoResultFound:
|
||||
raise qos.QueueNotFound(id=queue_id)
|
||||
|
||||
def get_qos_queues(self, context, filters=None, fields=None, sorts=None,
|
||||
limit=None, marker=None, page_reverse=False):
|
||||
marker_obj = self._get_marker_obj(context, 'qos_queue', limit, marker)
|
||||
return self._get_collection(context, nsx_models.QoSQueue,
|
||||
self._make_qos_queue_dict,
|
||||
filters=filters, fields=fields,
|
||||
sorts=sorts, limit=limit,
|
||||
marker_obj=marker_obj,
|
||||
page_reverse=page_reverse)
|
||||
|
||||
def delete_qos_queue(self, context, queue_id):
|
||||
qos_queue = self._get_qos_queue(context, queue_id)
|
||||
with context.session.begin(subtransactions=True):
|
||||
context.session.delete(qos_queue)
|
||||
|
||||
def _process_port_queue_mapping(self, context, port_data, queue_id):
|
||||
port_data[qos.QUEUE] = queue_id
|
||||
if not queue_id:
|
||||
return
|
||||
with context.session.begin(subtransactions=True):
|
||||
context.session.add(nsx_models.PortQueueMapping(
|
||||
port_id=port_data['id'],
|
||||
queue_id=queue_id))
|
||||
|
||||
def _get_port_queue_bindings(self, context, filters=None, fields=None):
|
||||
return self._get_collection(context, nsx_models.PortQueueMapping,
|
||||
self._make_port_queue_binding_dict,
|
||||
filters=filters, fields=fields)
|
||||
|
||||
def _delete_port_queue_mapping(self, context, port_id):
|
||||
query = self._model_query(context, nsx_models.PortQueueMapping)
|
||||
try:
|
||||
binding = query.filter(
|
||||
nsx_models.PortQueueMapping.port_id == port_id).one()
|
||||
except exc.NoResultFound:
|
||||
# return since this can happen if we are updating a port that
|
||||
# did not already have a queue on it. There is no need to check
|
||||
# if there is one before deleting if we return here.
|
||||
return
|
||||
with context.session.begin(subtransactions=True):
|
||||
context.session.delete(binding)
|
||||
|
||||
def _process_network_queue_mapping(self, context, net_data, queue_id):
|
||||
net_data[qos.QUEUE] = queue_id
|
||||
if not queue_id:
|
||||
return
|
||||
with context.session.begin(subtransactions=True):
|
||||
context.session.add(
|
||||
nsx_models.NetworkQueueMapping(network_id=net_data['id'],
|
||||
queue_id=queue_id))
|
||||
|
||||
def _get_network_queue_bindings(self, context, filters=None, fields=None):
|
||||
return self._get_collection(context, nsx_models.NetworkQueueMapping,
|
||||
self._make_network_queue_binding_dict,
|
||||
filters=filters, fields=fields)
|
||||
|
||||
def _delete_network_queue_mapping(self, context, network_id):
|
||||
query = self._model_query(context, nsx_models.NetworkQueueMapping)
|
||||
with context.session.begin(subtransactions=True):
|
||||
binding = query.filter_by(network_id=network_id).first()
|
||||
if binding:
|
||||
context.session.delete(binding)
|
||||
|
||||
def _extend_dict_qos_queue(self, obj_res, obj_db):
|
||||
queue_mapping = obj_db['qos_queue']
|
||||
if queue_mapping:
|
||||
obj_res[qos.QUEUE] = queue_mapping.get('queue_id')
|
||||
return obj_res
|
||||
|
||||
def _extend_port_dict_qos_queue(self, port_res, port_db):
|
||||
self._extend_dict_qos_queue(port_res, port_db)
|
||||
|
||||
def _extend_network_dict_qos_queue(self, network_res, network_db):
|
||||
self._extend_dict_qos_queue(network_res, network_db)
|
||||
|
||||
# Register dict extend functions for networks and ports
|
||||
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
|
||||
attr.NETWORKS, ['_extend_network_dict_qos_queue'])
|
||||
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
|
||||
attr.PORTS, ['_extend_port_dict_qos_queue'])
|
||||
|
||||
def _make_qos_queue_dict(self, queue, fields=None):
|
||||
res = {'id': queue['id'],
|
||||
'name': queue.get('name'),
|
||||
'default': queue.get('default'),
|
||||
'tenant_id': queue['tenant_id'],
|
||||
'min': queue.get('min'),
|
||||
'max': queue.get('max'),
|
||||
'qos_marking': queue.get('qos_marking'),
|
||||
'dscp': queue.get('dscp')}
|
||||
return self._fields(res, fields)
|
||||
|
||||
def _make_port_queue_binding_dict(self, queue, fields=None):
|
||||
res = {'port_id': queue['port_id'],
|
||||
'queue_id': queue['queue_id']}
|
||||
return self._fields(res, fields)
|
||||
|
||||
def _make_network_queue_binding_dict(self, queue, fields=None):
|
||||
res = {'network_id': queue['network_id'],
|
||||
'queue_id': queue['queue_id']}
|
||||
return self._fields(res, fields)
|
||||
|
||||
def _check_for_queue_and_create(self, context, port):
|
||||
"""Check for queue and create.
|
||||
|
||||
This function determines if a port should be associated with a
|
||||
queue. It works by first querying NetworkQueueMapping to determine
|
||||
if the network is associated with a queue. If so, then it queries
|
||||
NetworkQueueMapping for all the networks that are associated with
|
||||
this queue. Next, it queries against all the ports on these networks
|
||||
with the port device_id. Finally it queries PortQueueMapping. If that
|
||||
query returns a queue_id that is returned. Otherwise a queue is
|
||||
created that is the size of the queue associated with the network and
|
||||
that queue_id is returned.
|
||||
|
||||
If the network is not associated with a queue we then query to see
|
||||
if there is a default queue in the system. If so, a copy of that is
|
||||
created and the queue_id is returned.
|
||||
|
||||
Otherwise None is returned. None is also returned if the port does not
|
||||
have a device_id or if the device_owner is network:
|
||||
"""
|
||||
|
||||
queue_to_create = None
|
||||
# If there is no device_id don't create a queue. The queue will be
|
||||
# created on update port when the device_id is present. Also don't
|
||||
# apply QoS to network ports.
|
||||
if (not port.get('device_id') or
|
||||
port['device_owner'].startswith('network:')):
|
||||
return
|
||||
|
||||
# Check if there is a queue associated with the network
|
||||
filters = {'network_id': [port['network_id']]}
|
||||
network_queue_id = self._get_network_queue_bindings(
|
||||
context, filters, ['queue_id'])
|
||||
if network_queue_id:
|
||||
# get networks that queue is associated with
|
||||
filters = {'queue_id': [network_queue_id[0]['queue_id']]}
|
||||
networks_with_same_queue = self._get_network_queue_bindings(
|
||||
context, filters)
|
||||
|
||||
# get the ports on these networks with the same_queue and device_id
|
||||
filters = {'device_id': [port.get('device_id')],
|
||||
'network_id': [network['network_id'] for
|
||||
network in networks_with_same_queue]}
|
||||
query = self._model_query(context, models_v2.Port.id)
|
||||
query = self._apply_filters_to_query(query, models_v2.Port,
|
||||
filters)
|
||||
ports_ids = [p[0] for p in query]
|
||||
if ports_ids:
|
||||
# shared queue already exists find the queue id
|
||||
queues = self._get_port_queue_bindings(context,
|
||||
{'port_id': ports_ids},
|
||||
['queue_id'])
|
||||
if queues:
|
||||
return queues[0]['queue_id']
|
||||
|
||||
# get the size of the queue we want to create
|
||||
queue_to_create = self._get_qos_queue(
|
||||
context, network_queue_id[0]['queue_id'])
|
||||
|
||||
else:
|
||||
# check for default queue
|
||||
filters = {'default': [True]}
|
||||
# context is elevated since default queue is owned by admin
|
||||
queue_to_create = self.get_qos_queues(context.elevated(), filters)
|
||||
if not queue_to_create:
|
||||
return
|
||||
queue_to_create = queue_to_create[0]
|
||||
|
||||
# create the queue
|
||||
tenant_id = self._get_tenant_id_for_create(context, port)
|
||||
if port.get(qos.RXTX_FACTOR) and queue_to_create.get('max'):
|
||||
queue_to_create['max'] *= int(port[qos.RXTX_FACTOR])
|
||||
queue = {'qos_queue': {'name': queue_to_create.get('name'),
|
||||
'min': queue_to_create.get('min'),
|
||||
'max': queue_to_create.get('max'),
|
||||
'dscp': queue_to_create.get('dscp'),
|
||||
'qos_marking':
|
||||
queue_to_create.get('qos_marking'),
|
||||
'tenant_id': tenant_id}}
|
||||
return self.create_qos_queue(context, queue, False)['id']
|
||||
|
||||
def _validate_qos_queue(self, context, qos_queue):
|
||||
if qos_queue.get('default'):
|
||||
if context.is_admin:
|
||||
if self.get_qos_queues(context, filters={'default': [True]}):
|
||||
raise qos.DefaultQueueAlreadyExists()
|
||||
else:
|
||||
raise qos.DefaultQueueCreateNotAdmin()
|
||||
if qos_queue.get('qos_marking') == 'trusted':
|
||||
dscp = qos_queue.pop('dscp')
|
||||
if dscp:
|
||||
# must raise because a non-zero dscp was provided
|
||||
raise qos.QueueInvalidMarking()
|
||||
LOG.info(_LI("DSCP value (%s) will be ignored with 'trusted' "
|
||||
"marking"), dscp)
|
||||
max = qos_queue.get('max')
|
||||
min = qos_queue.get('min')
|
||||
# Max can be None
|
||||
if max and min > max:
|
||||
raise qos.QueueMinGreaterMax()
|
@ -1,52 +0,0 @@
|
||||
# Copyright 2013 VMware, Inc.
|
||||
#
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.vmware.dbexts import vcns_models
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def add_vcns_router_binding(session, router_id, vse_id, lswitch_id, status):
|
||||
with session.begin(subtransactions=True):
|
||||
binding = vcns_models.VcnsRouterBinding(
|
||||
router_id=router_id,
|
||||
edge_id=vse_id,
|
||||
lswitch_id=lswitch_id,
|
||||
status=status)
|
||||
session.add(binding)
|
||||
return binding
|
||||
|
||||
|
||||
def get_vcns_router_binding(session, router_id):
|
||||
with session.begin(subtransactions=True):
|
||||
return (session.query(vcns_models.VcnsRouterBinding).
|
||||
filter_by(router_id=router_id).first())
|
||||
|
||||
|
||||
def update_vcns_router_binding(session, router_id, **kwargs):
|
||||
with session.begin(subtransactions=True):
|
||||
binding = (session.query(vcns_models.VcnsRouterBinding).
|
||||
filter_by(router_id=router_id).one())
|
||||
for key, value in kwargs.iteritems():
|
||||
binding[key] = value
|
||||
|
||||
|
||||
def delete_vcns_router_binding(session, router_id):
|
||||
with session.begin(subtransactions=True):
|
||||
binding = (session.query(vcns_models.VcnsRouterBinding).
|
||||
filter_by(router_id=router_id).one())
|
||||
session.delete(binding)
|
@ -1,95 +0,0 @@
|
||||
# Copyright 2014 VMware, Inc.
|
||||
#
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
|
||||
from neutron.common import constants as const
|
||||
from neutron.common import topics
|
||||
from neutron.plugins.vmware.dhcp_meta import nsx as nsx_svc
|
||||
from neutron.plugins.vmware.dhcp_meta import rpc as nsx_rpc
|
||||
|
||||
|
||||
class DhcpAgentNotifyAPI(dhcp_rpc_agent_api.DhcpAgentNotifyAPI):
|
||||
|
||||
def __init__(self, plugin, manager):
|
||||
super(DhcpAgentNotifyAPI, self).__init__(topic=topics.DHCP_AGENT)
|
||||
self.agentless_notifier = nsx_svc.DhcpAgentNotifyAPI(plugin, manager)
|
||||
|
||||
def notify(self, context, data, methodname):
|
||||
[resource, action, _e] = methodname.split('.')
|
||||
lsn_manager = self.agentless_notifier.plugin.lsn_manager
|
||||
plugin = self.agentless_notifier.plugin
|
||||
if resource == 'network':
|
||||
net_id = data['network']['id']
|
||||
elif resource in ['port', 'subnet']:
|
||||
net_id = data[resource]['network_id']
|
||||
else:
|
||||
# no valid resource
|
||||
return
|
||||
lsn_exists = lsn_manager.lsn_exists(context, net_id)
|
||||
treat_dhcp_owner_specially = False
|
||||
if lsn_exists:
|
||||
# if lsn exists, the network is one created with the new model
|
||||
if (resource == 'subnet' and action == 'create' and
|
||||
const.DEVICE_OWNER_DHCP not in plugin.port_special_owners):
|
||||
# network/subnet provisioned in the new model have a plain
|
||||
# nsx lswitch port, no vif attachment
|
||||
plugin.port_special_owners.append(const.DEVICE_OWNER_DHCP)
|
||||
treat_dhcp_owner_specially = True
|
||||
if (resource == 'port' and action == 'update' or
|
||||
resource == 'subnet'):
|
||||
self.agentless_notifier.notify(context, data, methodname)
|
||||
elif not lsn_exists and resource in ['port', 'subnet']:
|
||||
# call notifier for the agent-based mode
|
||||
super(DhcpAgentNotifyAPI, self).notify(context, data, methodname)
|
||||
if treat_dhcp_owner_specially:
|
||||
# if subnets belong to networks created with the old model
|
||||
# dhcp port does not need to be special cased, so put things
|
||||
# back, since they were modified
|
||||
plugin.port_special_owners.remove(const.DEVICE_OWNER_DHCP)
|
||||
|
||||
|
||||
def handle_network_dhcp_access(plugin, context, network, action):
|
||||
nsx_svc.handle_network_dhcp_access(plugin, context, network, action)
|
||||
|
||||
|
||||
def handle_port_dhcp_access(plugin, context, port, action):
|
||||
if plugin.lsn_manager.lsn_exists(context, port['network_id']):
|
||||
nsx_svc.handle_port_dhcp_access(plugin, context, port, action)
|
||||
else:
|
||||
nsx_rpc.handle_port_dhcp_access(plugin, context, port, action)
|
||||
|
||||
|
||||
def handle_port_metadata_access(plugin, context, port, is_delete=False):
|
||||
if plugin.lsn_manager.lsn_exists(context, port['network_id']):
|
||||
nsx_svc.handle_port_metadata_access(plugin, context, port, is_delete)
|
||||
else:
|
||||
nsx_rpc.handle_port_metadata_access(plugin, context, port, is_delete)
|
||||
|
||||
|
||||
def handle_router_metadata_access(plugin, context, router_id, interface=None):
|
||||
if interface:
|
||||
subnet = plugin.get_subnet(context, interface['subnet_id'])
|
||||
network_id = subnet['network_id']
|
||||
if plugin.lsn_manager.lsn_exists(context, network_id):
|
||||
nsx_svc.handle_router_metadata_access(
|
||||
plugin, context, router_id, interface)
|
||||
else:
|
||||
nsx_rpc.handle_router_metadata_access(
|
||||
plugin, context, router_id, interface)
|
||||
else:
|
||||
nsx_rpc.handle_router_metadata_access(
|
||||
plugin, context, router_id, interface)
|
@ -1,28 +0,0 @@
|
||||
# Copyright 2014 VMware, Inc.
|
||||
#
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
from neutron.common import constants as const
|
||||
from neutron.db import l3_db
|
||||
|
||||
# A unique MAC to quickly identify the LSN port used for metadata services
|
||||
# when dhcp on the subnet is off. Inspired by leet-speak for 'metadata'.
|
||||
METADATA_MAC = "fa:15:73:74:d4:74"
|
||||
METADATA_PORT_ID = 'metadata:id'
|
||||
METADATA_PORT_NAME = 'metadata:name'
|
||||
METADATA_DEVICE_ID = 'metadata:device'
|
||||
SPECIAL_OWNERS = (const.DEVICE_OWNER_DHCP,
|
||||
const.DEVICE_OWNER_ROUTER_GW,
|
||||
l3_db.DEVICE_OWNER_ROUTER_INTF)
|
@ -1,477 +0,0 @@
|
||||
# Copyright 2014 VMware, Inc.
|
||||
#
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_db import exception as db_exc
|
||||
from oslo_utils import excutils
|
||||
|
||||
from neutron.common import exceptions as n_exc
|
||||
from neutron.i18n import _LE, _LW
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.vmware.api_client import exception as api_exc
|
||||
from neutron.plugins.vmware.common import exceptions as p_exc
|
||||
from neutron.plugins.vmware.common import nsx_utils
|
||||
from neutron.plugins.vmware.dbexts import lsn_db
|
||||
from neutron.plugins.vmware.dhcp_meta import constants as const
|
||||
from neutron.plugins.vmware.nsxlib import lsn as lsn_api
|
||||
from neutron.plugins.vmware.nsxlib import switch as switch_api
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
META_CONF = 'metadata-proxy'
|
||||
DHCP_CONF = 'dhcp'
|
||||
|
||||
|
||||
lsn_opts = [
|
||||
cfg.BoolOpt('sync_on_missing_data', default=False,
|
||||
help=_('Pull LSN information from NSX in case it is missing '
|
||||
'from the local data store. This is useful to rebuild '
|
||||
'the local store in case of server recovery.'))
|
||||
]
|
||||
|
||||
|
||||
def register_lsn_opts(config):
|
||||
config.CONF.register_opts(lsn_opts, "NSX_LSN")
|
||||
|
||||
|
||||
class LsnManager(object):
|
||||
"""Manage LSN entities associated with networks."""
|
||||
|
||||
def __init__(self, plugin):
|
||||
self.plugin = plugin
|
||||
|
||||
@property
|
||||
def cluster(self):
|
||||
return self.plugin.cluster
|
||||
|
||||
def lsn_exists(self, context, network_id):
|
||||
"""Return True if a Logical Service Node exists for the network."""
|
||||
return self.lsn_get(
|
||||
context, network_id, raise_on_err=False) is not None
|
||||
|
||||
def lsn_get(self, context, network_id, raise_on_err=True):
|
||||
"""Retrieve the LSN id associated to the network."""
|
||||
try:
|
||||
return lsn_api.lsn_for_network_get(self.cluster, network_id)
|
||||
except (n_exc.NotFound, api_exc.NsxApiException):
|
||||
if raise_on_err:
|
||||
LOG.error(_LE('Unable to find Logical Service Node for '
|
||||
'network %s.'),
|
||||
network_id)
|
||||
raise p_exc.LsnNotFound(entity='network',
|
||||
entity_id=network_id)
|
||||
else:
|
||||
LOG.warn(_LW('Unable to find Logical Service Node for '
|
||||
'the requested network %s.'),
|
||||
network_id)
|
||||
|
||||
def lsn_create(self, context, network_id):
|
||||
"""Create a LSN associated to the network."""
|
||||
try:
|
||||
return lsn_api.lsn_for_network_create(self.cluster, network_id)
|
||||
except api_exc.NsxApiException:
|
||||
err_msg = _('Unable to create LSN for network %s') % network_id
|
||||
raise p_exc.NsxPluginException(err_msg=err_msg)
|
||||
|
||||
def lsn_delete(self, context, lsn_id):
|
||||
"""Delete a LSN given its id."""
|
||||
try:
|
||||
lsn_api.lsn_delete(self.cluster, lsn_id)
|
||||
except (n_exc.NotFound, api_exc.NsxApiException):
|
||||
LOG.warn(_LW('Unable to delete Logical Service Node %s'), lsn_id)
|
||||
|
||||
def lsn_delete_by_network(self, context, network_id):
|
||||
"""Delete a LSN associated to the network."""
|
||||
lsn_id = self.lsn_get(context, network_id, raise_on_err=False)
|
||||
if lsn_id:
|
||||
self.lsn_delete(context, lsn_id)
|
||||
|
||||
def lsn_port_get(self, context, network_id, subnet_id, raise_on_err=True):
|
||||
"""Retrieve LSN and LSN port for the network and the subnet."""
|
||||
lsn_id = self.lsn_get(context, network_id, raise_on_err=raise_on_err)
|
||||
if lsn_id:
|
||||
try:
|
||||
lsn_port_id = lsn_api.lsn_port_by_subnet_get(
|
||||
self.cluster, lsn_id, subnet_id)
|
||||
except (n_exc.NotFound, api_exc.NsxApiException):
|
||||
if raise_on_err:
|
||||
LOG.error(_LE('Unable to find Logical Service Node Port '
|
||||
'for LSN %(lsn_id)s and subnet '
|
||||
'%(subnet_id)s'),
|
||||
{'lsn_id': lsn_id, 'subnet_id': subnet_id})
|
||||
raise p_exc.LsnPortNotFound(lsn_id=lsn_id,
|
||||
entity='subnet',
|
||||
entity_id=subnet_id)
|
||||
else:
|
||||
LOG.warn(_LW('Unable to find Logical Service Node Port '
|
||||
'for LSN %(lsn_id)s and subnet '
|
||||
'%(subnet_id)s'),
|
||||
{'lsn_id': lsn_id, 'subnet_id': subnet_id})
|
||||
return (lsn_id, None)
|
||||
else:
|
||||
return (lsn_id, lsn_port_id)
|
||||
else:
|
||||
return (None, None)
|
||||
|
||||
def lsn_port_get_by_mac(self, context, network_id, mac, raise_on_err=True):
|
||||
"""Retrieve LSN and LSN port given network and mac address."""
|
||||
lsn_id = self.lsn_get(context, network_id, raise_on_err=raise_on_err)
|
||||
if lsn_id:
|
||||
try:
|
||||
lsn_port_id = lsn_api.lsn_port_by_mac_get(
|
||||
self.cluster, lsn_id, mac)
|
||||
except (n_exc.NotFound, api_exc.NsxApiException):
|
||||
if raise_on_err:
|
||||
LOG.error(_LE('Unable to find Logical Service Node Port '
|
||||
'for LSN %(lsn_id)s and mac address '
|
||||
'%(mac)s'),
|
||||
{'lsn_id': lsn_id, 'mac': mac})
|
||||
raise p_exc.LsnPortNotFound(lsn_id=lsn_id,
|
||||
entity='MAC',
|
||||
entity_id=mac)
|
||||
else:
|
||||
LOG.warn(_LW('Unable to find Logical Service Node '
|
||||
'Port for LSN %(lsn_id)s and mac address '
|
||||
'%(mac)s'),
|
||||
{'lsn_id': lsn_id, 'mac': mac})
|
||||
return (lsn_id, None)
|
||||
else:
|
||||
return (lsn_id, lsn_port_id)
|
||||
else:
|
||||
return (None, None)
|
||||
|
||||
def lsn_port_create(self, context, lsn_id, subnet_info):
|
||||
"""Create and return LSN port for associated subnet."""
|
||||
try:
|
||||
return lsn_api.lsn_port_create(self.cluster, lsn_id, subnet_info)
|
||||
except n_exc.NotFound:
|
||||
raise p_exc.LsnNotFound(entity='', entity_id=lsn_id)
|
||||
except api_exc.NsxApiException:
|
||||
err_msg = _('Unable to create port for LSN %s') % lsn_id
|
||||
raise p_exc.NsxPluginException(err_msg=err_msg)
|
||||
|
||||
def lsn_port_delete(self, context, lsn_id, lsn_port_id):
|
||||
"""Delete a LSN port from the Logical Service Node."""
|
||||
try:
|
||||
lsn_api.lsn_port_delete(self.cluster, lsn_id, lsn_port_id)
|
||||
except (n_exc.NotFound, api_exc.NsxApiException):
|
||||
LOG.warn(_LW('Unable to delete LSN Port %s'), lsn_port_id)
|
||||
|
||||
def lsn_port_dispose(self, context, network_id, mac_address):
|
||||
"""Delete a LSN port given the network and the mac address."""
|
||||
lsn_id, lsn_port_id = self.lsn_port_get_by_mac(
|
||||
context, network_id, mac_address, raise_on_err=False)
|
||||
if lsn_port_id:
|
||||
self.lsn_port_delete(context, lsn_id, lsn_port_id)
|
||||
if mac_address == const.METADATA_MAC:
|
||||
try:
|
||||
lswitch_port_id = switch_api.get_port_by_neutron_tag(
|
||||
self.cluster, network_id,
|
||||
const.METADATA_PORT_ID)['uuid']
|
||||
switch_api.delete_port(
|
||||
self.cluster, network_id, lswitch_port_id)
|
||||
except (n_exc.PortNotFoundOnNetwork,
|
||||
api_exc.NsxApiException):
|
||||
LOG.warn(_LW("Metadata port not found while attempting "
|
||||
"to delete it from network %s"), network_id)
|
||||
else:
|
||||
LOG.warn(_LW("Unable to find Logical Services Node "
|
||||
"Port with MAC %s"), mac_address)
|
||||
|
||||
def lsn_port_dhcp_setup(
|
||||
self, context, network_id, port_id, port_data, subnet_config=None):
|
||||
"""Connect network to LSN via specified port and port_data."""
|
||||
try:
|
||||
lsn_id = None
|
||||
switch_id = nsx_utils.get_nsx_switch_ids(
|
||||
context.session, self.cluster, network_id)[0]
|
||||
lswitch_port_id = switch_api.get_port_by_neutron_tag(
|
||||
self.cluster, switch_id, port_id)['uuid']
|
||||
lsn_id = self.lsn_get(context, network_id)
|
||||
lsn_port_id = self.lsn_port_create(context, lsn_id, port_data)
|
||||
except (n_exc.NotFound, p_exc.NsxPluginException):
|
||||
raise p_exc.PortConfigurationError(
|
||||
net_id=network_id, lsn_id=lsn_id, port_id=port_id)
|
||||
else:
|
||||
try:
|
||||
lsn_api.lsn_port_plug_network(
|
||||
self.cluster, lsn_id, lsn_port_id, lswitch_port_id)
|
||||
except p_exc.LsnConfigurationConflict:
|
||||
self.lsn_port_delete(context, lsn_id, lsn_port_id)
|
||||
raise p_exc.PortConfigurationError(
|
||||
net_id=network_id, lsn_id=lsn_id, port_id=port_id)
|
||||
if subnet_config:
|
||||
self.lsn_port_dhcp_configure(
|
||||
context, lsn_id, lsn_port_id, subnet_config)
|
||||
else:
|
||||
return (lsn_id, lsn_port_id)
|
||||
|
||||
def lsn_port_metadata_setup(self, context, lsn_id, subnet):
|
||||
"""Connect subnet to specified LSN."""
|
||||
data = {
|
||||
"mac_address": const.METADATA_MAC,
|
||||
"ip_address": subnet['cidr'],
|
||||
"subnet_id": subnet['id']
|
||||
}
|
||||
network_id = subnet['network_id']
|
||||
tenant_id = subnet['tenant_id']
|
||||
lswitch_port_id = None
|
||||
try:
|
||||
switch_id = nsx_utils.get_nsx_switch_ids(
|
||||
context.session, self.cluster, network_id)[0]
|
||||
lswitch_port_id = switch_api.create_lport(
|
||||
self.cluster, switch_id, tenant_id,
|
||||
const.METADATA_PORT_ID, const.METADATA_PORT_NAME,
|
||||
const.METADATA_DEVICE_ID, True)['uuid']
|
||||
lsn_port_id = self.lsn_port_create(context, lsn_id, data)
|
||||
except (n_exc.NotFound, p_exc.NsxPluginException,
|
||||
api_exc.NsxApiException):
|
||||
raise p_exc.PortConfigurationError(
|
||||
net_id=network_id, lsn_id=lsn_id, port_id=lswitch_port_id)
|
||||
else:
|
||||
try:
|
||||
lsn_api.lsn_port_plug_network(
|
||||
self.cluster, lsn_id, lsn_port_id, lswitch_port_id)
|
||||
except p_exc.LsnConfigurationConflict:
|
||||
self.lsn_port_delete(self.cluster, lsn_id, lsn_port_id)
|
||||
switch_api.delete_port(
|
||||
self.cluster, network_id, lswitch_port_id)
|
||||
raise p_exc.PortConfigurationError(
|
||||
net_id=network_id, lsn_id=lsn_id, port_id=lsn_port_id)
|
||||
|
||||
def lsn_port_dhcp_configure(self, context, lsn_id, lsn_port_id, subnet):
|
||||
"""Enable/disable dhcp services with the given config options."""
|
||||
is_enabled = subnet["enable_dhcp"]
|
||||
dhcp_options = {
|
||||
"domain_name": cfg.CONF.NSX_DHCP.domain_name,
|
||||
"default_lease_time": cfg.CONF.NSX_DHCP.default_lease_time,
|
||||
}
|
||||
dns_servers = cfg.CONF.NSX_DHCP.extra_domain_name_servers or []
|
||||
dns_servers.extend(subnet["dns_nameservers"])
|
||||
if subnet['gateway_ip']:
|
||||
dhcp_options["routers"] = subnet["gateway_ip"]
|
||||
if dns_servers:
|
||||
dhcp_options["domain_name_servers"] = ",".join(dns_servers)
|
||||
if subnet["host_routes"]:
|
||||
dhcp_options["classless_static_routes"] = (
|
||||
",".join(subnet["host_routes"])
|
||||
)
|
||||
try:
|
||||
lsn_api.lsn_port_dhcp_configure(
|
||||
self.cluster, lsn_id, lsn_port_id, is_enabled, dhcp_options)
|
||||
except (n_exc.NotFound, api_exc.NsxApiException):
|
||||
err_msg = (_('Unable to configure dhcp for Logical Service '
|
||||
'Node %(lsn_id)s and port %(lsn_port_id)s')
|
||||
% {'lsn_id': lsn_id, 'lsn_port_id': lsn_port_id})
|
||||
LOG.error(err_msg)
|
||||
raise p_exc.NsxPluginException(err_msg=err_msg)
|
||||
|
||||
def lsn_metadata_configure(self, context, subnet_id, is_enabled):
|
||||
"""Configure metadata service for the specified subnet."""
|
||||
subnet = self.plugin.get_subnet(context, subnet_id)
|
||||
network_id = subnet['network_id']
|
||||
meta_conf = cfg.CONF.NSX_METADATA
|
||||
metadata_options = {
|
||||
'metadata_server_ip': meta_conf.metadata_server_address,
|
||||
'metadata_server_port': meta_conf.metadata_server_port,
|
||||
'metadata_proxy_shared_secret': meta_conf.metadata_shared_secret
|
||||
}
|
||||
try:
|
||||
lsn_id = self.lsn_get(context, network_id)
|
||||
lsn_api.lsn_metadata_configure(
|
||||
self.cluster, lsn_id, is_enabled, metadata_options)
|
||||
except (p_exc.LsnNotFound, api_exc.NsxApiException):
|
||||
err_msg = (_('Unable to configure metadata '
|
||||
'for subnet %s') % subnet_id)
|
||||
LOG.error(err_msg)
|
||||
raise p_exc.NsxPluginException(err_msg=err_msg)
|
||||
if is_enabled:
|
||||
try:
|
||||
# test that the lsn port exists
|
||||
self.lsn_port_get(context, network_id, subnet_id)
|
||||
except p_exc.LsnPortNotFound:
|
||||
# this might happen if subnet had dhcp off when created
|
||||
# so create one, and wire it
|
||||
self.lsn_port_metadata_setup(context, lsn_id, subnet)
|
||||
else:
|
||||
self.lsn_port_dispose(context, network_id, const.METADATA_MAC)
|
||||
|
||||
def _lsn_port_host_conf(self, context, network_id, subnet_id, data, hdlr):
|
||||
lsn_id, lsn_port_id = self.lsn_port_get(
|
||||
context, network_id, subnet_id, raise_on_err=False)
|
||||
try:
|
||||
if lsn_id and lsn_port_id:
|
||||
hdlr(self.cluster, lsn_id, lsn_port_id, data)
|
||||
except (n_exc.NotFound, api_exc.NsxApiException):
|
||||
LOG.error(_LE('Error while configuring LSN '
|
||||
'port %s'), lsn_port_id)
|
||||
raise p_exc.PortConfigurationError(
|
||||
net_id=network_id, lsn_id=lsn_id, port_id=lsn_port_id)
|
||||
|
||||
def lsn_port_dhcp_host_add(self, context, network_id, subnet_id, host):
|
||||
"""Add dhcp host entry to LSN port configuration."""
|
||||
self._lsn_port_host_conf(context, network_id, subnet_id, host,
|
||||
lsn_api.lsn_port_dhcp_host_add)
|
||||
|
||||
def lsn_port_dhcp_host_remove(self, context, network_id, subnet_id, host):
|
||||
"""Remove dhcp host entry from LSN port configuration."""
|
||||
self._lsn_port_host_conf(context, network_id, subnet_id, host,
|
||||
lsn_api.lsn_port_dhcp_host_remove)
|
||||
|
||||
def lsn_port_meta_host_add(self, context, network_id, subnet_id, host):
|
||||
"""Add dhcp host entry to LSN port configuration."""
|
||||
self._lsn_port_host_conf(context, network_id, subnet_id, host,
|
||||
lsn_api.lsn_port_metadata_host_add)
|
||||
|
||||
def lsn_port_meta_host_remove(self, context, network_id, subnet_id, host):
|
||||
"""Remove dhcp host entry from LSN port configuration."""
|
||||
self._lsn_port_host_conf(context, network_id, subnet_id, host,
|
||||
lsn_api.lsn_port_metadata_host_remove)
|
||||
|
||||
def lsn_port_update(
|
||||
self, context, network_id, subnet_id, dhcp=None, meta=None):
|
||||
"""Update the specified configuration for the LSN port."""
|
||||
if not dhcp and not meta:
|
||||
return
|
||||
try:
|
||||
lsn_id, lsn_port_id = self.lsn_port_get(
|
||||
context, network_id, subnet_id, raise_on_err=False)
|
||||
if dhcp and lsn_id and lsn_port_id:
|
||||
lsn_api.lsn_port_host_entries_update(
|
||||
self.cluster, lsn_id, lsn_port_id, DHCP_CONF, dhcp)
|
||||
if meta and lsn_id and lsn_port_id:
|
||||
lsn_api.lsn_port_host_entries_update(
|
||||
self.cluster, lsn_id, lsn_port_id, META_CONF, meta)
|
||||
except api_exc.NsxApiException:
|
||||
raise p_exc.PortConfigurationError(
|
||||
net_id=network_id, lsn_id=lsn_id, port_id=lsn_port_id)
|
||||
|
||||
|
||||
class PersistentLsnManager(LsnManager):
|
||||
"""Add local persistent state to LSN Manager."""
|
||||
|
||||
def __init__(self, plugin):
|
||||
super(PersistentLsnManager, self).__init__(plugin)
|
||||
self.sync_on_missing = cfg.CONF.NSX_LSN.sync_on_missing_data
|
||||
|
||||
def lsn_get(self, context, network_id, raise_on_err=True):
|
||||
try:
|
||||
obj = lsn_db.lsn_get_for_network(
|
||||
context, network_id, raise_on_err=raise_on_err)
|
||||
return obj.lsn_id if obj else None
|
||||
except p_exc.LsnNotFound:
|
||||
with excutils.save_and_reraise_exception() as ctxt:
|
||||
ctxt.reraise = False
|
||||
if self.sync_on_missing:
|
||||
lsn_id = super(PersistentLsnManager, self).lsn_get(
|
||||
context, network_id, raise_on_err=raise_on_err)
|
||||
self.lsn_save(context, network_id, lsn_id)
|
||||
return lsn_id
|
||||
if raise_on_err:
|
||||
ctxt.reraise = True
|
||||
|
||||
def lsn_save(self, context, network_id, lsn_id):
|
||||
"""Save LSN-Network mapping to the DB."""
|
||||
try:
|
||||
lsn_db.lsn_add(context, network_id, lsn_id)
|
||||
except db_exc.DBError:
|
||||
err_msg = _('Unable to save LSN for network %s') % network_id
|
||||
LOG.exception(err_msg)
|
||||
raise p_exc.NsxPluginException(err_msg=err_msg)
|
||||
|
||||
def lsn_create(self, context, network_id):
|
||||
lsn_id = super(PersistentLsnManager,
|
||||
self).lsn_create(context, network_id)
|
||||
try:
|
||||
self.lsn_save(context, network_id, lsn_id)
|
||||
except p_exc.NsxPluginException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
super(PersistentLsnManager, self).lsn_delete(context, lsn_id)
|
||||
return lsn_id
|
||||
|
||||
def lsn_delete(self, context, lsn_id):
|
||||
lsn_db.lsn_remove(context, lsn_id)
|
||||
super(PersistentLsnManager, self).lsn_delete(context, lsn_id)
|
||||
|
||||
def lsn_port_get(self, context, network_id, subnet_id, raise_on_err=True):
|
||||
try:
|
||||
obj = lsn_db.lsn_port_get_for_subnet(
|
||||
context, subnet_id, raise_on_err=raise_on_err)
|
||||
return (obj.lsn_id, obj.lsn_port_id) if obj else (None, None)
|
||||
except p_exc.LsnPortNotFound:
|
||||
with excutils.save_and_reraise_exception() as ctxt:
|
||||
ctxt.reraise = False
|
||||
if self.sync_on_missing:
|
||||
lsn_id, lsn_port_id = (
|
||||
super(PersistentLsnManager, self).lsn_port_get(
|
||||
context, network_id, subnet_id,
|
||||
raise_on_err=raise_on_err))
|
||||
mac_addr = lsn_api.lsn_port_info_get(
|
||||
self.cluster, lsn_id, lsn_port_id)['mac_address']
|
||||
self.lsn_port_save(
|
||||
context, lsn_port_id, subnet_id, mac_addr, lsn_id)
|
||||
return (lsn_id, lsn_port_id)
|
||||
if raise_on_err:
|
||||
ctxt.reraise = True
|
||||
|
||||
def lsn_port_get_by_mac(self, context, network_id, mac, raise_on_err=True):
|
||||
try:
|
||||
obj = lsn_db.lsn_port_get_for_mac(
|
||||
context, mac, raise_on_err=raise_on_err)
|
||||
return (obj.lsn_id, obj.lsn_port_id) if obj else (None, None)
|
||||
except p_exc.LsnPortNotFound:
|
||||
with excutils.save_and_reraise_exception() as ctxt:
|
||||
ctxt.reraise = False
|
||||
if self.sync_on_missing:
|
||||
lsn_id, lsn_port_id = (
|
||||
super(PersistentLsnManager, self).lsn_port_get_by_mac(
|
||||
context, network_id, mac,
|
||||
raise_on_err=raise_on_err))
|
||||
subnet_id = lsn_api.lsn_port_info_get(
|
||||
self.cluster, lsn_id, lsn_port_id).get('subnet_id')
|
||||
self.lsn_port_save(
|
||||
context, lsn_port_id, subnet_id, mac, lsn_id)
|
||||
return (lsn_id, lsn_port_id)
|
||||
if raise_on_err:
|
||||
ctxt.reraise = True
|
||||
|
||||
def lsn_port_save(self, context, lsn_port_id, subnet_id, mac_addr, lsn_id):
|
||||
"""Save LSN Port information to the DB."""
|
||||
try:
|
||||
lsn_db.lsn_port_add_for_lsn(
|
||||
context, lsn_port_id, subnet_id, mac_addr, lsn_id)
|
||||
except db_exc.DBError:
|
||||
err_msg = _('Unable to save LSN port for subnet %s') % subnet_id
|
||||
LOG.exception(err_msg)
|
||||
raise p_exc.NsxPluginException(err_msg=err_msg)
|
||||
|
||||
def lsn_port_create(self, context, lsn_id, subnet_info):
|
||||
lsn_port_id = super(PersistentLsnManager,
|
||||
self).lsn_port_create(context, lsn_id, subnet_info)
|
||||
try:
|
||||
self.lsn_port_save(context, lsn_port_id, subnet_info['subnet_id'],
|
||||
subnet_info['mac_address'], lsn_id)
|
||||
except p_exc.NsxPluginException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
super(PersistentLsnManager, self).lsn_port_delete(
|
||||
context, lsn_id, lsn_port_id)
|
||||
return lsn_port_id
|
||||
|
||||
def lsn_port_delete(self, context, lsn_id, lsn_port_id):
|
||||
lsn_db.lsn_port_remove(context, lsn_port_id)
|
||||
super(PersistentLsnManager, self).lsn_port_delete(
|
||||
context, lsn_id, lsn_port_id)
|
@ -1,181 +0,0 @@
|
||||
# Copyright 2014 VMware, Inc.
|
||||
#
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
from neutron.common import constants as const
|
||||
from neutron.common import exceptions as n_exc
|
||||
from neutron.extensions import external_net
|
||||
from neutron.i18n import _LE
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.vmware.common import exceptions as p_exc
|
||||
from neutron.plugins.vmware.dhcp_meta import nsx
|
||||
from neutron.plugins.vmware.dhcp_meta import rpc
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DhcpMetadataBuilder(object):
|
||||
|
||||
def __init__(self, plugin, agent_notifier):
|
||||
self.plugin = plugin
|
||||
self.notifier = agent_notifier
|
||||
|
||||
def dhcp_agent_get_all(self, context, network_id):
|
||||
"""Return the agents managing the network."""
|
||||
return self.plugin.list_dhcp_agents_hosting_network(
|
||||
context, network_id)['agents']
|
||||
|
||||
def dhcp_port_get_all(self, context, network_id):
|
||||
"""Return the dhcp ports allocated for the network."""
|
||||
filters = {
|
||||
'network_id': [network_id],
|
||||
'device_owner': [const.DEVICE_OWNER_DHCP]
|
||||
}
|
||||
return self.plugin.get_ports(context, filters=filters)
|
||||
|
||||
def router_id_get(self, context, subnet=None):
|
||||
"""Return the router and interface used for the subnet."""
|
||||
if not subnet:
|
||||
return
|
||||
network_id = subnet['network_id']
|
||||
filters = {
|
||||
'network_id': [network_id],
|
||||
'device_owner': [const.DEVICE_OWNER_ROUTER_INTF]
|
||||
}
|
||||
ports = self.plugin.get_ports(context, filters=filters)
|
||||
for port in ports:
|
||||
if port['fixed_ips'][0]['subnet_id'] == subnet['id']:
|
||||
return port['device_id']
|
||||
|
||||
def metadata_deallocate(self, context, router_id, subnet_id):
|
||||
"""Deallocate metadata services for the subnet."""
|
||||
interface = {'subnet_id': subnet_id}
|
||||
self.plugin.remove_router_interface(context, router_id, interface)
|
||||
|
||||
def metadata_allocate(self, context, router_id, subnet_id):
|
||||
"""Allocate metadata resources for the subnet via the router."""
|
||||
interface = {'subnet_id': subnet_id}
|
||||
self.plugin.add_router_interface(context, router_id, interface)
|
||||
|
||||
def dhcp_deallocate(self, context, network_id, agents, ports):
|
||||
"""Deallocate dhcp resources for the network."""
|
||||
for agent in agents:
|
||||
self.plugin.remove_network_from_dhcp_agent(
|
||||
context, agent['id'], network_id)
|
||||
for port in ports:
|
||||
try:
|
||||
self.plugin.delete_port(context, port['id'])
|
||||
except n_exc.PortNotFound:
|
||||
LOG.error(_LE('Port %s is already gone'), port['id'])
|
||||
|
||||
def dhcp_allocate(self, context, network_id, subnet):
|
||||
"""Allocate dhcp resources for the subnet."""
|
||||
# Create LSN resources
|
||||
network_data = {'id': network_id}
|
||||
nsx.handle_network_dhcp_access(self.plugin, context,
|
||||
network_data, 'create_network')
|
||||
if subnet:
|
||||
subnet_data = {'subnet': subnet}
|
||||
self.notifier.notify(context, subnet_data, 'subnet.create.end')
|
||||
# Get DHCP host and metadata entries created for the LSN
|
||||
port = {
|
||||
'network_id': network_id,
|
||||
'fixed_ips': [{'subnet_id': subnet['id']}]
|
||||
}
|
||||
self.notifier.notify(context, {'port': port}, 'port.update.end')
|
||||
|
||||
|
||||
class MigrationManager(object):
|
||||
|
||||
def __init__(self, plugin, lsn_manager, agent_notifier):
|
||||
self.plugin = plugin
|
||||
self.manager = lsn_manager
|
||||
self.builder = DhcpMetadataBuilder(plugin, agent_notifier)
|
||||
|
||||
def validate(self, context, network_id):
|
||||
"""Validate and return subnet's dhcp info for migration."""
|
||||
network = self.plugin.get_network(context, network_id)
|
||||
|
||||
if self.manager.lsn_exists(context, network_id):
|
||||
reason = _("LSN already exist")
|
||||
raise p_exc.LsnMigrationConflict(net_id=network_id, reason=reason)
|
||||
|
||||
if network[external_net.EXTERNAL]:
|
||||
reason = _("Cannot migrate an external network")
|
||||
raise n_exc.BadRequest(resource='network', msg=reason)
|
||||
|
||||
filters = {'network_id': [network_id]}
|
||||
subnets = self.plugin.get_subnets(context, filters=filters)
|
||||
count = len(subnets)
|
||||
if count == 0:
|
||||
return None
|
||||
elif count == 1 and subnets[0]['cidr'] == rpc.METADATA_SUBNET_CIDR:
|
||||
reason = _("Cannot migrate a 'metadata' network")
|
||||
raise n_exc.BadRequest(resource='network', msg=reason)
|
||||
elif count > 1:
|
||||
reason = _("Unable to support multiple subnets per network")
|
||||
raise p_exc.LsnMigrationConflict(net_id=network_id, reason=reason)
|
||||
else:
|
||||
return subnets[0]
|
||||
|
||||
def migrate(self, context, network_id, subnet=None):
|
||||
"""Migrate subnet resources to LSN."""
|
||||
router_id = self.builder.router_id_get(context, subnet)
|
||||
if router_id and subnet:
|
||||
# Deallocate resources taken for the router, if any
|
||||
self.builder.metadata_deallocate(context, router_id, subnet['id'])
|
||||
if subnet:
|
||||
# Deallocate reources taken for the agent, if any
|
||||
agents = self.builder.dhcp_agent_get_all(context, network_id)
|
||||
ports = self.builder.dhcp_port_get_all(context, network_id)
|
||||
self.builder.dhcp_deallocate(context, network_id, agents, ports)
|
||||
# (re)create the configuration for LSN
|
||||
self.builder.dhcp_allocate(context, network_id, subnet)
|
||||
if router_id and subnet:
|
||||
# Allocate resources taken for the router, if any
|
||||
self.builder.metadata_allocate(context, router_id, subnet['id'])
|
||||
|
||||
def report(self, context, network_id, subnet_id=None):
|
||||
"""Return a report of the dhcp and metadata resources in use."""
|
||||
if subnet_id:
|
||||
lsn_id, lsn_port_id = self.manager.lsn_port_get(
|
||||
context, network_id, subnet_id, raise_on_err=False)
|
||||
else:
|
||||
filters = {'network_id': [network_id]}
|
||||
subnets = self.plugin.get_subnets(context, filters=filters)
|
||||
if subnets:
|
||||
lsn_id, lsn_port_id = self.manager.lsn_port_get(
|
||||
context, network_id, subnets[0]['id'], raise_on_err=False)
|
||||
else:
|
||||
lsn_id = self.manager.lsn_get(context, network_id,
|
||||
raise_on_err=False)
|
||||
lsn_port_id = None
|
||||
if lsn_id:
|
||||
ports = [lsn_port_id] if lsn_port_id else []
|
||||
report = {
|
||||
'type': 'lsn',
|
||||
'services': [lsn_id],
|
||||
'ports': ports
|
||||
}
|
||||
else:
|
||||
agents = self.builder.dhcp_agent_get_all(context, network_id)
|
||||
ports = self.builder.dhcp_port_get_all(context, network_id)
|
||||
report = {
|
||||
'type': 'agent',
|
||||
'services': [a['id'] for a in agents],
|
||||
'ports': [p['id'] for p in ports]
|
||||
}
|
||||
return report
|
@ -1,323 +0,0 @@
|
||||
# Copyright 2013 VMware, Inc.
|
||||
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import excutils
|
||||
|
||||
from neutron.api.v2 import attributes as attr
|
||||
from neutron.common import constants as const
|
||||
from neutron.common import exceptions as n_exc
|
||||
from neutron.db import db_base_plugin_v2
|
||||
from neutron.db import l3_db
|
||||
from neutron.extensions import external_net
|
||||
from neutron.i18n import _LE, _LI
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.vmware.common import exceptions as p_exc
|
||||
from neutron.plugins.vmware.dhcp_meta import constants as d_const
|
||||
from neutron.plugins.vmware.nsxlib import lsn as lsn_api
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
dhcp_opts = [
|
||||
cfg.ListOpt('extra_domain_name_servers',
|
||||
deprecated_group='NVP_DHCP',
|
||||
default=[],
|
||||
help=_('Comma separated list of additional '
|
||||
'domain name servers')),
|
||||
cfg.StrOpt('domain_name',
|
||||
deprecated_group='NVP_DHCP',
|
||||
default='openstacklocal',
|
||||
help=_('Domain to use for building the hostnames')),
|
||||
cfg.IntOpt('default_lease_time', default=43200,
|
||||
deprecated_group='NVP_DHCP',
|
||||
help=_("Default DHCP lease time")),
|
||||
]
|
||||
|
||||
|
||||
metadata_opts = [
|
||||
cfg.StrOpt('metadata_server_address',
|
||||
deprecated_group='NVP_METADATA',
|
||||
default='127.0.0.1',
|
||||
help=_("IP address used by Metadata server.")),
|
||||
cfg.IntOpt('metadata_server_port',
|
||||
deprecated_group='NVP_METADATA',
|
||||
default=8775,
|
||||
help=_("TCP Port used by Metadata server.")),
|
||||
cfg.StrOpt('metadata_shared_secret',
|
||||
deprecated_group='NVP_METADATA',
|
||||
default='',
|
||||
help=_('Shared secret to sign instance-id request'),
|
||||
secret=True)
|
||||
]
|
||||
|
||||
|
||||
def register_dhcp_opts(config):
|
||||
config.CONF.register_opts(dhcp_opts, group="NSX_DHCP")
|
||||
|
||||
|
||||
def register_metadata_opts(config):
|
||||
config.CONF.register_opts(metadata_opts, group="NSX_METADATA")
|
||||
|
||||
|
||||
class DhcpAgentNotifyAPI(object):
|
||||
|
||||
def __init__(self, plugin, lsn_manager):
|
||||
self.plugin = plugin
|
||||
self.lsn_manager = lsn_manager
|
||||
self._handle_subnet_dhcp_access = {'create': self._subnet_create,
|
||||
'update': self._subnet_update,
|
||||
'delete': self._subnet_delete}
|
||||
|
||||
def notify(self, context, data, methodname):
|
||||
[resource, action, _e] = methodname.split('.')
|
||||
if resource == 'subnet':
|
||||
self._handle_subnet_dhcp_access[action](context, data['subnet'])
|
||||
elif resource == 'port' and action == 'update':
|
||||
self._port_update(context, data['port'])
|
||||
|
||||
def _port_update(self, context, port):
|
||||
# With no fixed IP's there's nothing that can be updated
|
||||
if not port["fixed_ips"]:
|
||||
return
|
||||
network_id = port['network_id']
|
||||
subnet_id = port["fixed_ips"][0]['subnet_id']
|
||||
filters = {'network_id': [network_id]}
|
||||
# Because NSX does not support updating a single host entry we
|
||||
# got to build the whole list from scratch and update in bulk
|
||||
ports = self.plugin.get_ports(context, filters)
|
||||
if not ports:
|
||||
return
|
||||
dhcp_conf = [
|
||||
{'mac_address': p['mac_address'],
|
||||
'ip_address': p["fixed_ips"][0]['ip_address']}
|
||||
for p in ports if is_user_port(p)
|
||||
]
|
||||
meta_conf = [
|
||||
{'instance_id': p['device_id'],
|
||||
'ip_address': p["fixed_ips"][0]['ip_address']}
|
||||
for p in ports if is_user_port(p, check_dev_id=True)
|
||||
]
|
||||
self.lsn_manager.lsn_port_update(
|
||||
context, network_id, subnet_id, dhcp=dhcp_conf, meta=meta_conf)
|
||||
|
||||
def _subnet_create(self, context, subnet, clean_on_err=True):
|
||||
if subnet['enable_dhcp']:
|
||||
network_id = subnet['network_id']
|
||||
# Create port for DHCP service
|
||||
dhcp_port = {
|
||||
"name": "",
|
||||
"admin_state_up": True,
|
||||
"device_id": "",
|
||||
"device_owner": const.DEVICE_OWNER_DHCP,
|
||||
"network_id": network_id,
|
||||
"tenant_id": subnet["tenant_id"],
|
||||
"mac_address": attr.ATTR_NOT_SPECIFIED,
|
||||
"fixed_ips": [{"subnet_id": subnet['id']}]
|
||||
}
|
||||
try:
|
||||
# This will end up calling handle_port_dhcp_access
|
||||
# down below as well as handle_port_metadata_access
|
||||
self.plugin.create_port(context, {'port': dhcp_port})
|
||||
except p_exc.PortConfigurationError as e:
|
||||
LOG.error(_LE("Error while creating subnet %(cidr)s for "
|
||||
"network %(network)s. Please, contact "
|
||||
"administrator"),
|
||||
{"cidr": subnet["cidr"],
|
||||
"network": network_id})
|
||||
db_base_plugin_v2.NeutronDbPluginV2.delete_port(
|
||||
self.plugin, context, e.port_id)
|
||||
if clean_on_err:
|
||||
self.plugin.delete_subnet(context, subnet['id'])
|
||||
raise n_exc.Conflict()
|
||||
|
||||
def _subnet_update(self, context, subnet):
|
||||
network_id = subnet['network_id']
|
||||
try:
|
||||
lsn_id, lsn_port_id = self.lsn_manager.lsn_port_get(
|
||||
context, network_id, subnet['id'])
|
||||
self.lsn_manager.lsn_port_dhcp_configure(
|
||||
context, lsn_id, lsn_port_id, subnet)
|
||||
except p_exc.LsnPortNotFound:
|
||||
# It's possible that the subnet was created with dhcp off;
|
||||
# check if the subnet was uplinked onto a router, and if so
|
||||
# remove the patch attachment between the metadata port and
|
||||
# the lsn port, in favor on the one we'll be creating during
|
||||
# _subnet_create
|
||||
self.lsn_manager.lsn_port_dispose(
|
||||
context, network_id, d_const.METADATA_MAC)
|
||||
# also, check that a dhcp port exists first and provision it
|
||||
# accordingly
|
||||
filters = dict(network_id=[network_id],
|
||||
device_owner=[const.DEVICE_OWNER_DHCP])
|
||||
ports = self.plugin.get_ports(context, filters=filters)
|
||||
if ports:
|
||||
handle_port_dhcp_access(
|
||||
self.plugin, context, ports[0], 'create_port')
|
||||
else:
|
||||
self._subnet_create(context, subnet, clean_on_err=False)
|
||||
|
||||
def _subnet_delete(self, context, subnet):
|
||||
# FIXME(armando-migliaccio): it looks like that a subnet filter
|
||||
# is ineffective; so filter by network for now.
|
||||
network_id = subnet['network_id']
|
||||
filters = dict(network_id=[network_id],
|
||||
device_owner=[const.DEVICE_OWNER_DHCP])
|
||||
# FIXME(armando-migliaccio): this may be race-y
|
||||
ports = self.plugin.get_ports(context, filters=filters)
|
||||
if ports:
|
||||
# This will end up calling handle_port_dhcp_access
|
||||
# down below as well as handle_port_metadata_access
|
||||
self.plugin.delete_port(context, ports[0]['id'])
|
||||
|
||||
|
||||
def is_user_port(p, check_dev_id=False):
|
||||
usable = p['fixed_ips'] and p['device_owner'] not in d_const.SPECIAL_OWNERS
|
||||
return usable if not check_dev_id else usable and p['device_id']
|
||||
|
||||
|
||||
def check_services_requirements(cluster):
|
||||
ver = cluster.api_client.get_version()
|
||||
# It sounds like 4.1 is the first one where DHCP in NSX
|
||||
# will have the experimental feature
|
||||
if ver.major >= 4 and ver.minor >= 1:
|
||||
cluster_id = cfg.CONF.default_service_cluster_uuid
|
||||
if not lsn_api.service_cluster_exists(cluster, cluster_id):
|
||||
raise p_exc.ServiceClusterUnavailable(cluster_id=cluster_id)
|
||||
else:
|
||||
raise p_exc.InvalidVersion(version=ver)
|
||||
|
||||
|
||||
def handle_network_dhcp_access(plugin, context, network, action):
|
||||
LOG.info(_LI("Performing DHCP %(action)s for resource: %(resource)s"),
|
||||
{"action": action, "resource": network})
|
||||
if action == 'create_network':
|
||||
network_id = network['id']
|
||||
if network.get(external_net.EXTERNAL):
|
||||
LOG.info(_LI("Network %s is external: no LSN to create"),
|
||||
network_id)
|
||||
return
|
||||
plugin.lsn_manager.lsn_create(context, network_id)
|
||||
elif action == 'delete_network':
|
||||
# NOTE(armando-migliaccio): on delete_network, network
|
||||
# is just the network id
|
||||
network_id = network
|
||||
plugin.lsn_manager.lsn_delete_by_network(context, network_id)
|
||||
LOG.info(_LI("Logical Services Node for network "
|
||||
"%s configured successfully"), network_id)
|
||||
|
||||
|
||||
def handle_port_dhcp_access(plugin, context, port, action):
|
||||
LOG.info(_LI("Performing DHCP %(action)s for resource: %(resource)s"),
|
||||
{"action": action, "resource": port})
|
||||
if port["device_owner"] == const.DEVICE_OWNER_DHCP:
|
||||
network_id = port["network_id"]
|
||||
if action == "create_port":
|
||||
# at this point the port must have a subnet and a fixed ip
|
||||
subnet_id = port["fixed_ips"][0]['subnet_id']
|
||||
subnet = plugin.get_subnet(context, subnet_id)
|
||||
subnet_data = {
|
||||
"mac_address": port["mac_address"],
|
||||
"ip_address": subnet['cidr'],
|
||||
"subnet_id": subnet['id']
|
||||
}
|
||||
try:
|
||||
plugin.lsn_manager.lsn_port_dhcp_setup(
|
||||
context, network_id, port['id'], subnet_data, subnet)
|
||||
except p_exc.PortConfigurationError:
|
||||
LOG.error(_LE("Error while configuring DHCP for "
|
||||
"port %s"), port['id'])
|
||||
raise n_exc.NeutronException()
|
||||
elif action == "delete_port":
|
||||
plugin.lsn_manager.lsn_port_dispose(context, network_id,
|
||||
port['mac_address'])
|
||||
elif port["device_owner"] != const.DEVICE_OWNER_DHCP:
|
||||
if port.get("fixed_ips"):
|
||||
# do something only if there are IP's and dhcp is enabled
|
||||
subnet_id = port["fixed_ips"][0]['subnet_id']
|
||||
if not plugin.get_subnet(context, subnet_id)['enable_dhcp']:
|
||||
LOG.info(_LI("DHCP is disabled for subnet %s: nothing "
|
||||
"to do"), subnet_id)
|
||||
return
|
||||
host_data = {
|
||||
"mac_address": port["mac_address"],
|
||||
"ip_address": port["fixed_ips"][0]['ip_address']
|
||||
}
|
||||
network_id = port["network_id"]
|
||||
if action == "create_port":
|
||||
handler = plugin.lsn_manager.lsn_port_dhcp_host_add
|
||||
elif action == "delete_port":
|
||||
handler = plugin.lsn_manager.lsn_port_dhcp_host_remove
|
||||
try:
|
||||
handler(context, network_id, subnet_id, host_data)
|
||||
except p_exc.PortConfigurationError:
|
||||
with excutils.save_and_reraise_exception():
|
||||
if action == 'create_port':
|
||||
db_base_plugin_v2.NeutronDbPluginV2.delete_port(
|
||||
plugin, context, port['id'])
|
||||
LOG.info(_LI("DHCP for port %s configured successfully"), port['id'])
|
||||
|
||||
|
||||
def handle_port_metadata_access(plugin, context, port, is_delete=False):
|
||||
if is_user_port(port, check_dev_id=True):
|
||||
network_id = port["network_id"]
|
||||
network = plugin.get_network(context, network_id)
|
||||
if network[external_net.EXTERNAL]:
|
||||
LOG.info(_LI("Network %s is external: nothing to do"),
|
||||
network_id)
|
||||
return
|
||||
subnet_id = port["fixed_ips"][0]['subnet_id']
|
||||
host_data = {
|
||||
"instance_id": port["device_id"],
|
||||
"tenant_id": port["tenant_id"],
|
||||
"ip_address": port["fixed_ips"][0]['ip_address']
|
||||
}
|
||||
LOG.info(_LI("Configuring metadata entry for port %s"), port)
|
||||
if not is_delete:
|
||||
handler = plugin.lsn_manager.lsn_port_meta_host_add
|
||||
else:
|
||||
handler = plugin.lsn_manager.lsn_port_meta_host_remove
|
||||
try:
|
||||
handler(context, network_id, subnet_id, host_data)
|
||||
except p_exc.PortConfigurationError:
|
||||
with excutils.save_and_reraise_exception():
|
||||
if not is_delete:
|
||||
db_base_plugin_v2.NeutronDbPluginV2.delete_port(
|
||||
plugin, context, port['id'])
|
||||
LOG.info(_LI("Metadata for port %s configured successfully"),
|
||||
port['id'])
|
||||
|
||||
|
||||
def handle_router_metadata_access(plugin, context, router_id, interface=None):
|
||||
LOG.info(_LI("Handle metadata access via router: %(r)s and "
|
||||
"interface %(i)s"), {'r': router_id, 'i': interface})
|
||||
if interface:
|
||||
try:
|
||||
plugin.get_port(context, interface['port_id'])
|
||||
is_enabled = True
|
||||
except n_exc.NotFound:
|
||||
is_enabled = False
|
||||
subnet_id = interface['subnet_id']
|
||||
try:
|
||||
plugin.lsn_manager.lsn_metadata_configure(
|
||||
context, subnet_id, is_enabled)
|
||||
except p_exc.NsxPluginException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
if is_enabled:
|
||||
l3_db.L3_NAT_db_mixin.remove_router_interface(
|
||||
plugin, context, router_id, interface)
|
||||
LOG.info(_LI("Metadata for router %s handled successfully"), router_id)
|
@ -1,214 +0,0 @@
|
||||
# Copyright 2013 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
from eventlet import greenthread
|
||||
import netaddr
|
||||
from oslo_config import cfg
|
||||
|
||||
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
|
||||
from neutron.api.v2 import attributes
|
||||
from neutron.common import constants as const
|
||||
from neutron.common import exceptions as ntn_exc
|
||||
from neutron.db import db_base_plugin_v2
|
||||
from neutron.db import l3_db
|
||||
from neutron.db import models_v2
|
||||
from neutron.i18n import _LE, _LI, _LW
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.vmware.api_client import exception as api_exc
|
||||
from neutron.plugins.vmware.common import config
|
||||
from neutron.plugins.vmware.common import exceptions as nsx_exc
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
METADATA_DEFAULT_PREFIX = 30
|
||||
METADATA_SUBNET_CIDR = '169.254.169.252/%d' % METADATA_DEFAULT_PREFIX
|
||||
METADATA_GATEWAY_IP = '169.254.169.253'
|
||||
METADATA_DHCP_ROUTE = '169.254.169.254/32'
|
||||
|
||||
|
||||
def handle_network_dhcp_access(plugin, context, network, action):
|
||||
pass
|
||||
|
||||
|
||||
def handle_port_dhcp_access(plugin, context, port_data, action):
|
||||
active_port = (cfg.CONF.NSX.metadata_mode == config.MetadataModes.INDIRECT
|
||||
and port_data.get('device_owner') == const.DEVICE_OWNER_DHCP
|
||||
and port_data.get('fixed_ips', []))
|
||||
if active_port:
|
||||
subnet_id = port_data['fixed_ips'][0]['subnet_id']
|
||||
subnet = plugin.get_subnet(context, subnet_id)
|
||||
_notify_rpc_agent(context, {'subnet': subnet}, 'subnet.update.end')
|
||||
|
||||
|
||||
def handle_port_metadata_access(plugin, context, port, is_delete=False):
|
||||
if (cfg.CONF.NSX.metadata_mode == config.MetadataModes.INDIRECT and
|
||||
port.get('device_owner') == const.DEVICE_OWNER_DHCP):
|
||||
if port.get('fixed_ips', []) or is_delete:
|
||||
fixed_ip = port['fixed_ips'][0]
|
||||
query = context.session.query(models_v2.Subnet)
|
||||
subnet = query.filter(
|
||||
models_v2.Subnet.id == fixed_ip['subnet_id']).one()
|
||||
# If subnet does not have a gateway do not create metadata
|
||||
# route. This is done via the enable_isolated_metadata
|
||||
# option if desired.
|
||||
if not subnet.get('gateway_ip'):
|
||||
LOG.info(_LI('Subnet %s does not have a gateway, the '
|
||||
'metadata route will not be created'),
|
||||
subnet['id'])
|
||||
return
|
||||
metadata_routes = [r for r in subnet.routes
|
||||
if r['destination'] == METADATA_DHCP_ROUTE]
|
||||
if metadata_routes:
|
||||
# We should have only a single metadata route at any time
|
||||
# because the route logic forbids two routes with the same
|
||||
# destination. Update next hop with the provided IP address
|
||||
if not is_delete:
|
||||
metadata_routes[0].nexthop = fixed_ip['ip_address']
|
||||
else:
|
||||
context.session.delete(metadata_routes[0])
|
||||
else:
|
||||
# add the metadata route
|
||||
route = models_v2.SubnetRoute(
|
||||
subnet_id=subnet.id,
|
||||
destination=METADATA_DHCP_ROUTE,
|
||||
nexthop=fixed_ip['ip_address'])
|
||||
context.session.add(route)
|
||||
|
||||
|
||||
def handle_router_metadata_access(plugin, context, router_id, interface=None):
|
||||
if cfg.CONF.NSX.metadata_mode != config.MetadataModes.DIRECT:
|
||||
LOG.debug("Metadata access network is disabled")
|
||||
return
|
||||
if not cfg.CONF.allow_overlapping_ips:
|
||||
LOG.warn(_LW("Overlapping IPs must be enabled in order to setup "
|
||||
"the metadata access network"))
|
||||
return
|
||||
ctx_elevated = context.elevated()
|
||||
device_filter = {'device_id': [router_id],
|
||||
'device_owner': [l3_db.DEVICE_OWNER_ROUTER_INTF]}
|
||||
# Retrieve ports calling database plugin
|
||||
ports = db_base_plugin_v2.NeutronDbPluginV2.get_ports(
|
||||
plugin, ctx_elevated, filters=device_filter)
|
||||
try:
|
||||
if ports:
|
||||
if (interface and
|
||||
not _find_metadata_port(plugin, ctx_elevated, ports)):
|
||||
_create_metadata_access_network(
|
||||
plugin, ctx_elevated, router_id)
|
||||
elif len(ports) == 1:
|
||||
# The only port left might be the metadata port
|
||||
_destroy_metadata_access_network(
|
||||
plugin, ctx_elevated, router_id, ports)
|
||||
else:
|
||||
LOG.debug("No router interface found for router '%s'. "
|
||||
"No metadata access network should be "
|
||||
"created or destroyed", router_id)
|
||||
# TODO(salvatore-orlando): A better exception handling in the
|
||||
# NSX plugin would allow us to improve error handling here
|
||||
except (ntn_exc.NeutronException, nsx_exc.NsxPluginException,
|
||||
api_exc.NsxApiException):
|
||||
# Any exception here should be regarded as non-fatal
|
||||
LOG.exception(_LE("An error occurred while operating on the "
|
||||
"metadata access network for router:'%s'"),
|
||||
router_id)
|
||||
|
||||
|
||||
def _find_metadata_port(plugin, context, ports):
|
||||
for port in ports:
|
||||
for fixed_ip in port['fixed_ips']:
|
||||
cidr = netaddr.IPNetwork(
|
||||
plugin.get_subnet(context, fixed_ip['subnet_id'])['cidr'])
|
||||
if cidr in netaddr.IPNetwork(METADATA_SUBNET_CIDR):
|
||||
return port
|
||||
|
||||
|
||||
def _create_metadata_access_network(plugin, context, router_id):
|
||||
# Add network
|
||||
# Network name is likely to be truncated on NSX
|
||||
net_data = {'name': 'meta-%s' % router_id,
|
||||
'tenant_id': '', # intentionally not set
|
||||
'admin_state_up': True,
|
||||
'port_security_enabled': False,
|
||||
'shared': False,
|
||||
'status': const.NET_STATUS_ACTIVE}
|
||||
meta_net = plugin.create_network(context,
|
||||
{'network': net_data})
|
||||
greenthread.sleep(0) # yield
|
||||
plugin.schedule_network(context, meta_net)
|
||||
greenthread.sleep(0) # yield
|
||||
# From this point on there will be resources to garbage-collect
|
||||
# in case of failures
|
||||
meta_sub = None
|
||||
try:
|
||||
# Add subnet
|
||||
subnet_data = {'network_id': meta_net['id'],
|
||||
'tenant_id': '', # intentionally not set
|
||||
'name': 'meta-%s' % router_id,
|
||||
'ip_version': 4,
|
||||
'shared': False,
|
||||
'cidr': METADATA_SUBNET_CIDR,
|
||||
'enable_dhcp': True,
|
||||
# Ensure default allocation pool is generated
|
||||
'allocation_pools': attributes.ATTR_NOT_SPECIFIED,
|
||||
'gateway_ip': METADATA_GATEWAY_IP,
|
||||
'dns_nameservers': [],
|
||||
'host_routes': []}
|
||||
meta_sub = plugin.create_subnet(context,
|
||||
{'subnet': subnet_data})
|
||||
greenthread.sleep(0) # yield
|
||||
plugin.add_router_interface(context, router_id,
|
||||
{'subnet_id': meta_sub['id']})
|
||||
greenthread.sleep(0) # yield
|
||||
# Tell to start the metadata agent proxy, only if we had success
|
||||
_notify_rpc_agent(context, {'subnet': meta_sub}, 'subnet.create.end')
|
||||
except (ntn_exc.NeutronException,
|
||||
nsx_exc.NsxPluginException,
|
||||
api_exc.NsxApiException):
|
||||
# It is not necessary to explicitly delete the subnet
|
||||
# as it will be removed with the network
|
||||
plugin.delete_network(context, meta_net['id'])
|
||||
|
||||
|
||||
def _destroy_metadata_access_network(plugin, context, router_id, ports):
|
||||
if not ports:
|
||||
return
|
||||
meta_port = _find_metadata_port(plugin, context, ports)
|
||||
if not meta_port:
|
||||
return
|
||||
meta_net_id = meta_port['network_id']
|
||||
meta_sub_id = meta_port['fixed_ips'][0]['subnet_id']
|
||||
plugin.remove_router_interface(
|
||||
context, router_id, {'port_id': meta_port['id']})
|
||||
greenthread.sleep(0) # yield
|
||||
context.session.expunge_all()
|
||||
try:
|
||||
# Remove network (this will remove the subnet too)
|
||||
plugin.delete_network(context, meta_net_id)
|
||||
greenthread.sleep(0) # yield
|
||||
except (ntn_exc.NeutronException, nsx_exc.NsxPluginException,
|
||||
api_exc.NsxApiException):
|
||||
# must re-add the router interface
|
||||
plugin.add_router_interface(context, router_id,
|
||||
{'subnet_id': meta_sub_id})
|
||||
# Tell to stop the metadata agent proxy
|
||||
_notify_rpc_agent(
|
||||
context, {'network': {'id': meta_net_id}}, 'network.delete.end')
|
||||
|
||||
|
||||
def _notify_rpc_agent(context, payload, event):
|
||||
if cfg.CONF.dhcp_agent_notification:
|
||||
dhcp_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
|
||||
dhcp_notifier.notify(context, payload, event)
|
@ -1,167 +0,0 @@
|
||||
# Copyright 2013 VMware, Inc.
|
||||
#
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import importutils
|
||||
|
||||
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
|
||||
from neutron.api.rpc.handlers import dhcp_rpc
|
||||
from neutron.api.rpc.handlers import metadata_rpc
|
||||
from neutron.common import constants as const
|
||||
from neutron.common import rpc as n_rpc
|
||||
from neutron.common import topics
|
||||
from neutron.db import agents_db
|
||||
from neutron.i18n import _LW
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.vmware.common import config
|
||||
from neutron.plugins.vmware.common import exceptions as nsx_exc
|
||||
from neutron.plugins.vmware.dhcp_meta import combined
|
||||
from neutron.plugins.vmware.dhcp_meta import lsnmanager
|
||||
from neutron.plugins.vmware.dhcp_meta import migration
|
||||
from neutron.plugins.vmware.dhcp_meta import nsx as nsx_svc
|
||||
from neutron.plugins.vmware.dhcp_meta import rpc as nsx_rpc
|
||||
from neutron.plugins.vmware.extensions import lsn
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DhcpMetadataAccess(object):
|
||||
|
||||
def setup_dhcpmeta_access(self):
|
||||
"""Initialize support for DHCP and Metadata services."""
|
||||
self._init_extensions()
|
||||
if cfg.CONF.NSX.agent_mode == config.AgentModes.AGENT:
|
||||
self._setup_rpc_dhcp_metadata()
|
||||
mod = nsx_rpc
|
||||
elif cfg.CONF.NSX.agent_mode == config.AgentModes.AGENTLESS:
|
||||
self._setup_nsx_dhcp_metadata()
|
||||
mod = nsx_svc
|
||||
elif cfg.CONF.NSX.agent_mode == config.AgentModes.COMBINED:
|
||||
notifier = self._setup_nsx_dhcp_metadata()
|
||||
self._setup_rpc_dhcp_metadata(notifier=notifier)
|
||||
mod = combined
|
||||
else:
|
||||
error = _("Invalid agent_mode: %s") % cfg.CONF.NSX.agent_mode
|
||||
LOG.error(error)
|
||||
raise nsx_exc.NsxPluginException(err_msg=error)
|
||||
self.handle_network_dhcp_access_delegate = (
|
||||
mod.handle_network_dhcp_access
|
||||
)
|
||||
self.handle_port_dhcp_access_delegate = (
|
||||
mod.handle_port_dhcp_access
|
||||
)
|
||||
self.handle_port_metadata_access_delegate = (
|
||||
mod.handle_port_metadata_access
|
||||
)
|
||||
self.handle_metadata_access_delegate = (
|
||||
mod.handle_router_metadata_access
|
||||
)
|
||||
|
||||
def _setup_rpc_dhcp_metadata(self, notifier=None):
|
||||
self.topic = topics.PLUGIN
|
||||
self.conn = n_rpc.create_connection(new=True)
|
||||
self.endpoints = [dhcp_rpc.DhcpRpcCallback(),
|
||||
agents_db.AgentExtRpcCallback(),
|
||||
metadata_rpc.MetadataRpcCallback()]
|
||||
self.conn.create_consumer(self.topic, self.endpoints, fanout=False)
|
||||
self.agent_notifiers[const.AGENT_TYPE_DHCP] = (
|
||||
notifier or dhcp_rpc_agent_api.DhcpAgentNotifyAPI())
|
||||
self.conn.consume_in_threads()
|
||||
self.network_scheduler = importutils.import_object(
|
||||
cfg.CONF.network_scheduler_driver
|
||||
)
|
||||
self.supported_extension_aliases.extend(
|
||||
['agent', 'dhcp_agent_scheduler'])
|
||||
|
||||
def _setup_nsx_dhcp_metadata(self):
|
||||
self._check_services_requirements()
|
||||
nsx_svc.register_dhcp_opts(cfg)
|
||||
nsx_svc.register_metadata_opts(cfg)
|
||||
lsnmanager.register_lsn_opts(cfg)
|
||||
lsn_manager = lsnmanager.PersistentLsnManager(self.safe_reference)
|
||||
self.lsn_manager = lsn_manager
|
||||
if cfg.CONF.NSX.agent_mode == config.AgentModes.AGENTLESS:
|
||||
notifier = nsx_svc.DhcpAgentNotifyAPI(self.safe_reference,
|
||||
lsn_manager)
|
||||
self.agent_notifiers[const.AGENT_TYPE_DHCP] = notifier
|
||||
# In agentless mode, ports whose owner is DHCP need to
|
||||
# be special cased; so add it to the list of special
|
||||
# owners list
|
||||
if const.DEVICE_OWNER_DHCP not in self.port_special_owners:
|
||||
self.port_special_owners.append(const.DEVICE_OWNER_DHCP)
|
||||
elif cfg.CONF.NSX.agent_mode == config.AgentModes.COMBINED:
|
||||
# This becomes ineffective, as all new networks creations
|
||||
# are handled by Logical Services Nodes in NSX
|
||||
cfg.CONF.set_override('network_auto_schedule', False)
|
||||
LOG.warn(_LW('network_auto_schedule has been disabled'))
|
||||
notifier = combined.DhcpAgentNotifyAPI(self.safe_reference,
|
||||
lsn_manager)
|
||||
self.supported_extension_aliases.append(lsn.EXT_ALIAS)
|
||||
# Add the capability to migrate dhcp and metadata services over
|
||||
self.migration_manager = (
|
||||
migration.MigrationManager(
|
||||
self.safe_reference, lsn_manager, notifier))
|
||||
return notifier
|
||||
|
||||
def _init_extensions(self):
|
||||
extensions = (lsn.EXT_ALIAS, 'agent', 'dhcp_agent_scheduler')
|
||||
for ext in extensions:
|
||||
if ext in self.supported_extension_aliases:
|
||||
self.supported_extension_aliases.remove(ext)
|
||||
|
||||
def _check_services_requirements(self):
|
||||
try:
|
||||
error = None
|
||||
nsx_svc.check_services_requirements(self.cluster)
|
||||
except nsx_exc.InvalidVersion:
|
||||
error = _("Unable to run Neutron with config option '%s', as NSX "
|
||||
"does not support it") % cfg.CONF.NSX.agent_mode
|
||||
except nsx_exc.ServiceClusterUnavailable:
|
||||
error = _("Unmet dependency for config option "
|
||||
"'%s'") % cfg.CONF.NSX.agent_mode
|
||||
if error:
|
||||
LOG.exception(error)
|
||||
raise nsx_exc.NsxPluginException(err_msg=error)
|
||||
|
||||
def get_lsn(self, context, network_id, fields=None):
|
||||
report = self.migration_manager.report(context, network_id)
|
||||
return {'network': network_id, 'report': report}
|
||||
|
||||
def create_lsn(self, context, lsn):
|
||||
network_id = lsn['lsn']['network']
|
||||
subnet = self.migration_manager.validate(context, network_id)
|
||||
subnet_id = None if not subnet else subnet['id']
|
||||
self.migration_manager.migrate(context, network_id, subnet)
|
||||
r = self.migration_manager.report(context, network_id, subnet_id)
|
||||
return {'network': network_id, 'report': r}
|
||||
|
||||
def handle_network_dhcp_access(self, context, network, action):
|
||||
self.handle_network_dhcp_access_delegate(self.safe_reference, context,
|
||||
network, action)
|
||||
|
||||
def handle_port_dhcp_access(self, context, port_data, action):
|
||||
self.handle_port_dhcp_access_delegate(self.safe_reference, context,
|
||||
port_data, action)
|
||||
|
||||
def handle_port_metadata_access(self, context, port, is_delete=False):
|
||||
self.handle_port_metadata_access_delegate(self.safe_reference, context,
|
||||
port, is_delete)
|
||||
|
||||
def handle_router_metadata_access(self, context,
|
||||
router_id, interface=None):
|
||||
self.handle_metadata_access_delegate(self.safe_reference, context,
|
||||
router_id, interface)
|
@ -19,7 +19,6 @@ from oslo_config import cfg
|
||||
|
||||
from neutron.api.v2 import attributes
|
||||
from neutron.api.v2 import resource_helper
|
||||
from neutron.plugins.vmware.common import utils
|
||||
|
||||
GATEWAY_RESOURCE_NAME = "network_gateway"
|
||||
DEVICE_RESOURCE_NAME = "gateway_device"
|
||||
@ -30,6 +29,20 @@ GATEWAY_DEVICES = "%ss" % DEVICE_RESOURCE_NAME.replace('_', '-')
|
||||
DEVICE_ID_ATTR = 'id'
|
||||
IFACE_NAME_ATTR = 'interface_name'
|
||||
|
||||
|
||||
# TODO(salv-orlando): This type definition is duplicated into
|
||||
# stackforge/vmware-nsx. This temporary duplication should be removed once the
|
||||
# plugin decomposition is finished.
|
||||
# Allowed network types for the NSX Plugin
|
||||
class NetworkTypes(object):
|
||||
"""Allowed provider network types for the NSX Plugin."""
|
||||
L3_EXT = 'l3_ext'
|
||||
STT = 'stt'
|
||||
GRE = 'gre'
|
||||
FLAT = 'flat'
|
||||
VLAN = 'vlan'
|
||||
BRIDGE = 'bridge'
|
||||
|
||||
# Attribute Map for Network Gateway Resource
|
||||
# TODO(salvatore-orlando): add admin state as other neutron resources
|
||||
RESOURCE_ATTRIBUTE_MAP = {
|
||||
@ -111,11 +124,11 @@ def _validate_connector_type(data, valid_values=None):
|
||||
msg = _("A connector type is required to create a gateway device")
|
||||
return msg
|
||||
connector_types = (valid_values if valid_values else
|
||||
[utils.NetworkTypes.GRE,
|
||||
utils.NetworkTypes.STT,
|
||||
utils.NetworkTypes.BRIDGE,
|
||||
'ipsec%s' % utils.NetworkTypes.GRE,
|
||||
'ipsec%s' % utils.NetworkTypes.STT])
|
||||
[NetworkTypes.GRE,
|
||||
NetworkTypes.STT,
|
||||
NetworkTypes.BRIDGE,
|
||||
'ipsec%s' % NetworkTypes.GRE,
|
||||
'ipsec%s' % NetworkTypes.STT])
|
||||
if data not in connector_types:
|
||||
msg = _("Unknown connector type: %s") % data
|
||||
return msg
|
||||
|
@ -1,98 +0,0 @@
|
||||
# Copyright 2012 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
from neutron.i18n import _LI
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.vmware.common import exceptions
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
DEFAULT_PORT = 443
|
||||
# Raise if one of those attributes is not specified
|
||||
REQUIRED_ATTRIBUTES = ['default_tz_uuid', 'nsx_user',
|
||||
'nsx_password', 'nsx_controllers']
|
||||
# Emit a INFO log if one of those attributes is not specified
|
||||
IMPORTANT_ATTRIBUTES = ['default_l3_gw_service_uuid']
|
||||
# Deprecated attributes
|
||||
DEPRECATED_ATTRIBUTES = ['metadata_dhcp_host_route',
|
||||
'nvp_user', 'nvp_password', 'nvp_controllers']
|
||||
|
||||
|
||||
class NSXCluster(object):
|
||||
"""NSX cluster class.
|
||||
|
||||
Encapsulates controller connections and the API client for a NSX cluster.
|
||||
|
||||
Controller-specific parameters, such as timeouts are stored in the
|
||||
elements of the controllers attribute, which are dicts.
|
||||
"""
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
self._required_attributes = REQUIRED_ATTRIBUTES[:]
|
||||
self._important_attributes = IMPORTANT_ATTRIBUTES[:]
|
||||
self._deprecated_attributes = {}
|
||||
self._sanity_check(kwargs)
|
||||
|
||||
for opt, val in self._deprecated_attributes.iteritems():
|
||||
LOG.deprecated(_("Attribute '%s' has been deprecated or moved "
|
||||
"to a new section. See new configuration file "
|
||||
"for details."), opt)
|
||||
depr_func = getattr(self, '_process_%s' % opt, None)
|
||||
if depr_func:
|
||||
depr_func(val)
|
||||
|
||||
# If everything went according to plan these two lists should be empty
|
||||
if self._required_attributes:
|
||||
raise exceptions.InvalidClusterConfiguration(
|
||||
invalid_attrs=self._required_attributes)
|
||||
if self._important_attributes:
|
||||
LOG.info(_LI("The following cluster attributes were "
|
||||
"not specified: %s'"), self._important_attributes)
|
||||
# The API client will be explicitly created by users of this class
|
||||
self.api_client = None
|
||||
|
||||
def _sanity_check(self, options):
|
||||
# Iterating this way ensures the conf parameters also
|
||||
# define the structure of this class
|
||||
for arg in cfg.CONF:
|
||||
if arg not in DEPRECATED_ATTRIBUTES:
|
||||
setattr(self, arg, options.get(arg, cfg.CONF.get(arg)))
|
||||
self._process_attribute(arg)
|
||||
elif options.get(arg) is not None:
|
||||
# Process deprecated attributes only if specified
|
||||
self._deprecated_attributes[arg] = options.get(arg)
|
||||
|
||||
def _process_attribute(self, attribute):
|
||||
# Process the attribute only if it's not empty!
|
||||
if getattr(self, attribute, None):
|
||||
if attribute in self._required_attributes:
|
||||
self._required_attributes.remove(attribute)
|
||||
if attribute in self._important_attributes:
|
||||
self._important_attributes.remove(attribute)
|
||||
handler_func = getattr(self, '_process_%s' % attribute, None)
|
||||
if handler_func:
|
||||
handler_func()
|
||||
|
||||
def _process_nsx_controllers(self):
|
||||
# If this raises something is not right, so let it bubble up
|
||||
# TODO(salvatore-orlando): Also validate attribute here
|
||||
for i, ctrl in enumerate(self.nsx_controllers or []):
|
||||
if len(ctrl.split(':')) == 1:
|
||||
self.nsx_controllers[i] = '%s:%s' % (ctrl, DEFAULT_PORT)
|
||||
|
||||
def _process_nvp_controllers(self):
|
||||
self.nsx_controllers = self.nvp_controllers
|
||||
self._process_nsx_controllers()
|
@ -1,147 +0,0 @@
|
||||
# Copyright 2014 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_serialization import jsonutils
|
||||
|
||||
from neutron.common import exceptions as exception
|
||||
from neutron.openstack.common import log
|
||||
from neutron.plugins.vmware.api_client import exception as api_exc
|
||||
from neutron.plugins.vmware.common import exceptions as nsx_exc
|
||||
from neutron import version
|
||||
|
||||
HTTP_GET = "GET"
|
||||
HTTP_POST = "POST"
|
||||
HTTP_DELETE = "DELETE"
|
||||
HTTP_PUT = "PUT"
|
||||
# Prefix to be used for all NSX API calls
|
||||
URI_PREFIX = "/ws.v1"
|
||||
NEUTRON_VERSION = version.version_info.release_string()
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
def _build_uri_path(resource,
|
||||
resource_id=None,
|
||||
parent_resource_id=None,
|
||||
fields=None,
|
||||
relations=None,
|
||||
filters=None,
|
||||
types=None,
|
||||
is_attachment=False,
|
||||
extra_action=None):
|
||||
resources = resource.split('/')
|
||||
res_path = resources[0]
|
||||
if resource_id:
|
||||
res_path += "/%s" % resource_id
|
||||
if len(resources) > 1:
|
||||
# There is also a parent resource to account for in the uri
|
||||
res_path = "%s/%s/%s" % (resources[1],
|
||||
parent_resource_id,
|
||||
res_path)
|
||||
if is_attachment:
|
||||
res_path = "%s/attachment" % res_path
|
||||
elif extra_action:
|
||||
res_path = "%s/%s" % (res_path, extra_action)
|
||||
params = []
|
||||
params.append(fields and "fields=%s" % fields)
|
||||
params.append(relations and "relations=%s" % relations)
|
||||
params.append(types and "types=%s" % types)
|
||||
if filters:
|
||||
sorted_filters = [
|
||||
'%s=%s' % (k, filters[k]) for k in sorted(filters.keys())
|
||||
]
|
||||
params.extend(sorted_filters)
|
||||
uri_path = "%s/%s" % (URI_PREFIX, res_path)
|
||||
non_empty_params = [x for x in params if x is not None]
|
||||
if non_empty_params:
|
||||
query_string = '&'.join(non_empty_params)
|
||||
if query_string:
|
||||
uri_path += "?%s" % query_string
|
||||
return uri_path
|
||||
|
||||
|
||||
def format_exception(etype, e, exception_locals):
|
||||
"""Consistent formatting for exceptions.
|
||||
|
||||
:param etype: a string describing the exception type.
|
||||
:param e: the exception.
|
||||
:param execption_locals: calling context local variable dict.
|
||||
:returns: a formatted string.
|
||||
"""
|
||||
msg = [_("Error. %(type)s exception: %(exc)s.") %
|
||||
{'type': etype, 'exc': e}]
|
||||
l = dict((k, v) for k, v in exception_locals.iteritems()
|
||||
if k != 'request')
|
||||
msg.append(_("locals=[%s]") % str(l))
|
||||
return ' '.join(msg)
|
||||
|
||||
|
||||
def do_request(*args, **kwargs):
|
||||
"""Issue a request to the cluster specified in kwargs.
|
||||
|
||||
:param args: a list of positional arguments.
|
||||
:param kwargs: a list of keyworkds arguments.
|
||||
:returns: the result of the operation loaded into a python
|
||||
object or None.
|
||||
"""
|
||||
cluster = kwargs["cluster"]
|
||||
try:
|
||||
res = cluster.api_client.request(*args)
|
||||
if res:
|
||||
return jsonutils.loads(res)
|
||||
except api_exc.ResourceNotFound:
|
||||
raise exception.NotFound()
|
||||
except api_exc.ReadOnlyMode:
|
||||
raise nsx_exc.MaintenanceInProgress()
|
||||
|
||||
|
||||
def get_single_query_page(path, cluster, page_cursor=None,
|
||||
page_length=1000, neutron_only=True):
|
||||
params = []
|
||||
if page_cursor:
|
||||
params.append("_page_cursor=%s" % page_cursor)
|
||||
params.append("_page_length=%s" % page_length)
|
||||
# NOTE(salv-orlando): On the NSX backend the 'Quantum' tag is still
|
||||
# used for marking Neutron entities in order to preserve compatibility
|
||||
if neutron_only:
|
||||
params.append("tag_scope=quantum")
|
||||
query_params = "&".join(params)
|
||||
path = "%s%s%s" % (path, "&" if (path.find("?") != -1) else "?",
|
||||
query_params)
|
||||
body = do_request(HTTP_GET, path, cluster=cluster)
|
||||
# Result_count won't be returned if _page_cursor is supplied
|
||||
return body['results'], body.get('page_cursor'), body.get('result_count')
|
||||
|
||||
|
||||
def get_all_query_pages(path, cluster):
|
||||
need_more_results = True
|
||||
result_list = []
|
||||
page_cursor = None
|
||||
while need_more_results:
|
||||
results, page_cursor = get_single_query_page(
|
||||
path, cluster, page_cursor)[:2]
|
||||
if not page_cursor:
|
||||
need_more_results = False
|
||||
result_list.extend(results)
|
||||
return result_list
|
||||
|
||||
|
||||
def mk_body(**kwargs):
|
||||
"""Convenience function creates and dumps dictionary to string.
|
||||
|
||||
:param kwargs: the key/value pirs to be dumped into a json string.
|
||||
:returns: a json string.
|
||||
"""
|
||||
return jsonutils.dumps(kwargs, ensure_ascii=False)
|
@ -1,212 +0,0 @@
|
||||
# Copyright 2014 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
from oslo_serialization import jsonutils
|
||||
|
||||
from neutron.openstack.common import log
|
||||
from neutron.plugins.vmware.api_client import exception as api_exc
|
||||
from neutron.plugins.vmware.common import exceptions as nsx_exc
|
||||
from neutron.plugins.vmware.common import utils
|
||||
from neutron.plugins.vmware import nsxlib
|
||||
from neutron.plugins.vmware.nsxlib import switch
|
||||
|
||||
HTTP_GET = "GET"
|
||||
HTTP_POST = "POST"
|
||||
HTTP_DELETE = "DELETE"
|
||||
HTTP_PUT = "PUT"
|
||||
|
||||
GWSERVICE_RESOURCE = "gateway-service"
|
||||
TRANSPORTNODE_RESOURCE = "transport-node"
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
def create_l2_gw_service(cluster, tenant_id, display_name, devices):
|
||||
"""Create a NSX Layer-2 Network Gateway Service.
|
||||
|
||||
:param cluster: The target NSX cluster
|
||||
:param tenant_id: Identifier of the Openstack tenant for which
|
||||
the gateway service.
|
||||
:param display_name: Descriptive name of this gateway service
|
||||
:param devices: List of transport node uuids (and network
|
||||
interfaces on them) to use for the network gateway service
|
||||
:raise NsxApiException: if there is a problem while communicating
|
||||
with the NSX controller
|
||||
"""
|
||||
# NOTE(salvatore-orlando): This is a little confusing, but device_id in
|
||||
# NSX is actually the identifier a physical interface on the gateway
|
||||
# device, which in the Neutron API is referred as interface_name
|
||||
gateways = [{"transport_node_uuid": device['id'],
|
||||
"device_id": device['interface_name'],
|
||||
"type": "L2Gateway"} for device in devices]
|
||||
gwservice_obj = {
|
||||
"display_name": utils.check_and_truncate(display_name),
|
||||
"tags": utils.get_tags(os_tid=tenant_id),
|
||||
"gateways": gateways,
|
||||
"type": "L2GatewayServiceConfig"
|
||||
}
|
||||
return nsxlib.do_request(
|
||||
HTTP_POST, nsxlib._build_uri_path(GWSERVICE_RESOURCE),
|
||||
jsonutils.dumps(gwservice_obj), cluster=cluster)
|
||||
|
||||
|
||||
def plug_l2_gw_service(cluster, lswitch_id, lport_id,
|
||||
gateway_id, vlan_id=None):
|
||||
"""Plug a Layer-2 Gateway Attachment object in a logical port."""
|
||||
att_obj = {'type': 'L2GatewayAttachment',
|
||||
'l2_gateway_service_uuid': gateway_id}
|
||||
if vlan_id:
|
||||
att_obj['vlan_id'] = vlan_id
|
||||
return switch.plug_interface(cluster, lswitch_id, lport_id, att_obj)
|
||||
|
||||
|
||||
def get_l2_gw_service(cluster, gateway_id):
|
||||
return nsxlib.do_request(
|
||||
HTTP_GET, nsxlib._build_uri_path(GWSERVICE_RESOURCE,
|
||||
resource_id=gateway_id),
|
||||
cluster=cluster)
|
||||
|
||||
|
||||
def get_l2_gw_services(cluster, tenant_id=None,
|
||||
fields=None, filters=None):
|
||||
actual_filters = dict(filters or {})
|
||||
if tenant_id:
|
||||
actual_filters['tag'] = tenant_id
|
||||
actual_filters['tag_scope'] = 'os_tid'
|
||||
return nsxlib.get_all_query_pages(
|
||||
nsxlib._build_uri_path(GWSERVICE_RESOURCE,
|
||||
filters=actual_filters),
|
||||
cluster)
|
||||
|
||||
|
||||
def update_l2_gw_service(cluster, gateway_id, display_name):
|
||||
# TODO(salvatore-orlando): Allow updates for gateways too
|
||||
gwservice_obj = get_l2_gw_service(cluster, gateway_id)
|
||||
if not display_name:
|
||||
# Nothing to update
|
||||
return gwservice_obj
|
||||
gwservice_obj["display_name"] = utils.check_and_truncate(display_name)
|
||||
return nsxlib.do_request(HTTP_PUT,
|
||||
nsxlib._build_uri_path(GWSERVICE_RESOURCE,
|
||||
resource_id=gateway_id),
|
||||
jsonutils.dumps(gwservice_obj), cluster=cluster)
|
||||
|
||||
|
||||
def delete_l2_gw_service(cluster, gateway_id):
|
||||
nsxlib.do_request(HTTP_DELETE,
|
||||
nsxlib._build_uri_path(GWSERVICE_RESOURCE,
|
||||
resource_id=gateway_id),
|
||||
cluster=cluster)
|
||||
|
||||
|
||||
def _build_gateway_device_body(tenant_id, display_name, neutron_id,
|
||||
connector_type, connector_ip,
|
||||
client_certificate, tz_uuid):
|
||||
|
||||
connector_type_mappings = {
|
||||
utils.NetworkTypes.STT: "STTConnector",
|
||||
utils.NetworkTypes.GRE: "GREConnector",
|
||||
utils.NetworkTypes.BRIDGE: "BridgeConnector",
|
||||
'ipsec%s' % utils.NetworkTypes.STT: "IPsecSTT",
|
||||
'ipsec%s' % utils.NetworkTypes.GRE: "IPsecGRE"}
|
||||
nsx_connector_type = connector_type_mappings.get(connector_type)
|
||||
body = {"display_name": utils.check_and_truncate(display_name),
|
||||
"tags": utils.get_tags(os_tid=tenant_id,
|
||||
q_gw_dev_id=neutron_id),
|
||||
"admin_status_enabled": True}
|
||||
|
||||
if connector_ip and nsx_connector_type:
|
||||
body["transport_connectors"] = [
|
||||
{"transport_zone_uuid": tz_uuid,
|
||||
"ip_address": connector_ip,
|
||||
"type": nsx_connector_type}]
|
||||
|
||||
if client_certificate:
|
||||
body["credential"] = {"client_certificate":
|
||||
{"pem_encoded": client_certificate},
|
||||
"type": "SecurityCertificateCredential"}
|
||||
return body
|
||||
|
||||
|
||||
def create_gateway_device(cluster, tenant_id, display_name, neutron_id,
|
||||
tz_uuid, connector_type, connector_ip,
|
||||
client_certificate):
|
||||
body = _build_gateway_device_body(tenant_id, display_name, neutron_id,
|
||||
connector_type, connector_ip,
|
||||
client_certificate, tz_uuid)
|
||||
try:
|
||||
return nsxlib.do_request(
|
||||
HTTP_POST, nsxlib._build_uri_path(TRANSPORTNODE_RESOURCE),
|
||||
jsonutils.dumps(body, sort_keys=True), cluster=cluster)
|
||||
except api_exc.InvalidSecurityCertificate:
|
||||
raise nsx_exc.InvalidSecurityCertificate()
|
||||
|
||||
|
||||
def update_gateway_device(cluster, gateway_id, tenant_id,
|
||||
display_name, neutron_id,
|
||||
tz_uuid, connector_type, connector_ip,
|
||||
client_certificate):
|
||||
body = _build_gateway_device_body(tenant_id, display_name, neutron_id,
|
||||
connector_type, connector_ip,
|
||||
client_certificate, tz_uuid)
|
||||
try:
|
||||
return nsxlib.do_request(
|
||||
HTTP_PUT,
|
||||
nsxlib._build_uri_path(TRANSPORTNODE_RESOURCE,
|
||||
resource_id=gateway_id),
|
||||
jsonutils.dumps(body, sort_keys=True), cluster=cluster)
|
||||
except api_exc.InvalidSecurityCertificate:
|
||||
raise nsx_exc.InvalidSecurityCertificate()
|
||||
|
||||
|
||||
def delete_gateway_device(cluster, device_uuid):
|
||||
return nsxlib.do_request(HTTP_DELETE,
|
||||
nsxlib._build_uri_path(TRANSPORTNODE_RESOURCE,
|
||||
device_uuid),
|
||||
cluster=cluster)
|
||||
|
||||
|
||||
def get_gateway_device_status(cluster, device_uuid):
|
||||
status_res = nsxlib.do_request(HTTP_GET,
|
||||
nsxlib._build_uri_path(
|
||||
TRANSPORTNODE_RESOURCE,
|
||||
device_uuid,
|
||||
extra_action='status'),
|
||||
cluster=cluster)
|
||||
# Returns the connection status
|
||||
return status_res['connection']['connected']
|
||||
|
||||
|
||||
def get_gateway_devices_status(cluster, tenant_id=None):
|
||||
if tenant_id:
|
||||
gw_device_query_path = nsxlib._build_uri_path(
|
||||
TRANSPORTNODE_RESOURCE,
|
||||
fields="uuid,tags",
|
||||
relations="TransportNodeStatus",
|
||||
filters={'tag': tenant_id,
|
||||
'tag_scope': 'os_tid'})
|
||||
else:
|
||||
gw_device_query_path = nsxlib._build_uri_path(
|
||||
TRANSPORTNODE_RESOURCE,
|
||||
fields="uuid,tags",
|
||||
relations="TransportNodeStatus")
|
||||
|
||||
response = nsxlib.get_all_query_pages(gw_device_query_path, cluster)
|
||||
results = {}
|
||||
for item in response:
|
||||
results[item['uuid']] = (item['_relations']['TransportNodeStatus']
|
||||
['connection']['connected'])
|
||||
return results
|
@ -1,269 +0,0 @@
|
||||
# Copyright 2013 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_serialization import jsonutils
|
||||
|
||||
from neutron.common import exceptions as exception
|
||||
from neutron.openstack.common import log
|
||||
from neutron.plugins.vmware.api_client import exception as api_exc
|
||||
from neutron.plugins.vmware.common import exceptions as nsx_exc
|
||||
from neutron.plugins.vmware.common import utils
|
||||
from neutron.plugins.vmware import nsxlib
|
||||
|
||||
HTTP_GET = "GET"
|
||||
HTTP_POST = "POST"
|
||||
HTTP_DELETE = "DELETE"
|
||||
HTTP_PUT = "PUT"
|
||||
|
||||
SERVICECLUSTER_RESOURCE = "edge-cluster"
|
||||
LSERVICESNODE_RESOURCE = "lservices-node"
|
||||
LSERVICESNODEPORT_RESOURCE = "lport/%s" % LSERVICESNODE_RESOURCE
|
||||
SUPPORTED_METADATA_OPTIONS = ['metadata_proxy_shared_secret']
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
def service_cluster_exists(cluster, svc_cluster_id):
|
||||
exists = False
|
||||
try:
|
||||
exists = (
|
||||
svc_cluster_id and
|
||||
nsxlib.do_request(HTTP_GET,
|
||||
nsxlib._build_uri_path(
|
||||
SERVICECLUSTER_RESOURCE,
|
||||
resource_id=svc_cluster_id),
|
||||
cluster=cluster) is not None)
|
||||
except exception.NotFound:
|
||||
pass
|
||||
return exists
|
||||
|
||||
|
||||
def lsn_for_network_create(cluster, network_id):
|
||||
lsn_obj = {
|
||||
"edge_cluster_uuid": cluster.default_service_cluster_uuid,
|
||||
"tags": utils.get_tags(n_network_id=network_id)
|
||||
}
|
||||
return nsxlib.do_request(HTTP_POST,
|
||||
nsxlib._build_uri_path(LSERVICESNODE_RESOURCE),
|
||||
jsonutils.dumps(lsn_obj),
|
||||
cluster=cluster)["uuid"]
|
||||
|
||||
|
||||
def lsn_for_network_get(cluster, network_id):
|
||||
filters = {"tag": network_id, "tag_scope": "n_network_id"}
|
||||
results = nsxlib.do_request(HTTP_GET,
|
||||
nsxlib._build_uri_path(LSERVICESNODE_RESOURCE,
|
||||
fields="uuid",
|
||||
filters=filters),
|
||||
cluster=cluster)['results']
|
||||
if not results:
|
||||
raise exception.NotFound()
|
||||
elif len(results) == 1:
|
||||
return results[0]['uuid']
|
||||
|
||||
|
||||
def lsn_delete(cluster, lsn_id):
|
||||
nsxlib.do_request(HTTP_DELETE,
|
||||
nsxlib._build_uri_path(LSERVICESNODE_RESOURCE,
|
||||
resource_id=lsn_id),
|
||||
cluster=cluster)
|
||||
|
||||
|
||||
def lsn_port_host_entries_update(
|
||||
cluster, lsn_id, lsn_port_id, conf, hosts_data):
|
||||
hosts_obj = {'hosts': hosts_data}
|
||||
nsxlib.do_request(HTTP_PUT,
|
||||
nsxlib._build_uri_path(LSERVICESNODEPORT_RESOURCE,
|
||||
parent_resource_id=lsn_id,
|
||||
resource_id=lsn_port_id,
|
||||
extra_action=conf),
|
||||
jsonutils.dumps(hosts_obj),
|
||||
cluster=cluster)
|
||||
|
||||
|
||||
def lsn_port_create(cluster, lsn_id, port_data):
|
||||
port_obj = {
|
||||
"ip_address": port_data["ip_address"],
|
||||
"mac_address": port_data["mac_address"],
|
||||
"tags": utils.get_tags(n_mac_address=port_data["mac_address"],
|
||||
n_subnet_id=port_data["subnet_id"]),
|
||||
"type": "LogicalServicesNodePortConfig",
|
||||
}
|
||||
return nsxlib.do_request(HTTP_POST,
|
||||
nsxlib._build_uri_path(LSERVICESNODEPORT_RESOURCE,
|
||||
parent_resource_id=lsn_id),
|
||||
jsonutils.dumps(port_obj),
|
||||
cluster=cluster)["uuid"]
|
||||
|
||||
|
||||
def lsn_port_delete(cluster, lsn_id, lsn_port_id):
|
||||
return nsxlib.do_request(HTTP_DELETE,
|
||||
nsxlib._build_uri_path(LSERVICESNODEPORT_RESOURCE,
|
||||
parent_resource_id=lsn_id,
|
||||
resource_id=lsn_port_id),
|
||||
cluster=cluster)
|
||||
|
||||
|
||||
def _lsn_port_get(cluster, lsn_id, filters):
|
||||
results = nsxlib.do_request(HTTP_GET,
|
||||
nsxlib._build_uri_path(
|
||||
LSERVICESNODEPORT_RESOURCE,
|
||||
parent_resource_id=lsn_id,
|
||||
fields="uuid",
|
||||
filters=filters),
|
||||
cluster=cluster)['results']
|
||||
if not results:
|
||||
raise exception.NotFound()
|
||||
elif len(results) == 1:
|
||||
return results[0]['uuid']
|
||||
|
||||
|
||||
def lsn_port_by_mac_get(cluster, lsn_id, mac_address):
|
||||
filters = {"tag": mac_address, "tag_scope": "n_mac_address"}
|
||||
return _lsn_port_get(cluster, lsn_id, filters)
|
||||
|
||||
|
||||
def lsn_port_by_subnet_get(cluster, lsn_id, subnet_id):
|
||||
filters = {"tag": subnet_id, "tag_scope": "n_subnet_id"}
|
||||
return _lsn_port_get(cluster, lsn_id, filters)
|
||||
|
||||
|
||||
def lsn_port_info_get(cluster, lsn_id, lsn_port_id):
|
||||
result = nsxlib.do_request(HTTP_GET,
|
||||
nsxlib._build_uri_path(
|
||||
LSERVICESNODEPORT_RESOURCE,
|
||||
parent_resource_id=lsn_id,
|
||||
resource_id=lsn_port_id),
|
||||
cluster=cluster)
|
||||
for tag in result['tags']:
|
||||
if tag['scope'] == 'n_subnet_id':
|
||||
result['subnet_id'] = tag['tag']
|
||||
break
|
||||
return result
|
||||
|
||||
|
||||
def lsn_port_plug_network(cluster, lsn_id, lsn_port_id, lswitch_port_id):
|
||||
patch_obj = {
|
||||
"type": "PatchAttachment",
|
||||
"peer_port_uuid": lswitch_port_id
|
||||
}
|
||||
try:
|
||||
nsxlib.do_request(HTTP_PUT,
|
||||
nsxlib._build_uri_path(LSERVICESNODEPORT_RESOURCE,
|
||||
parent_resource_id=lsn_id,
|
||||
resource_id=lsn_port_id,
|
||||
is_attachment=True),
|
||||
jsonutils.dumps(patch_obj),
|
||||
cluster=cluster)
|
||||
except api_exc.Conflict:
|
||||
# This restriction might be lifted at some point
|
||||
msg = (_("Attempt to plug Logical Services Node %(lsn)s into "
|
||||
"network with port %(port)s failed. PatchAttachment "
|
||||
"already exists with another port") %
|
||||
{'lsn': lsn_id, 'port': lswitch_port_id})
|
||||
LOG.exception(msg)
|
||||
raise nsx_exc.LsnConfigurationConflict(lsn_id=lsn_id)
|
||||
|
||||
|
||||
def _lsn_configure_action(
|
||||
cluster, lsn_id, action, is_enabled, obj):
|
||||
lsn_obj = {"enabled": is_enabled}
|
||||
lsn_obj.update(obj)
|
||||
nsxlib.do_request(HTTP_PUT,
|
||||
nsxlib._build_uri_path(LSERVICESNODE_RESOURCE,
|
||||
resource_id=lsn_id,
|
||||
extra_action=action),
|
||||
jsonutils.dumps(lsn_obj),
|
||||
cluster=cluster)
|
||||
|
||||
|
||||
def _lsn_port_configure_action(
|
||||
cluster, lsn_id, lsn_port_id, action, is_enabled, obj):
|
||||
nsxlib.do_request(HTTP_PUT,
|
||||
nsxlib._build_uri_path(LSERVICESNODE_RESOURCE,
|
||||
resource_id=lsn_id,
|
||||
extra_action=action),
|
||||
jsonutils.dumps({"enabled": is_enabled}),
|
||||
cluster=cluster)
|
||||
nsxlib.do_request(HTTP_PUT,
|
||||
nsxlib._build_uri_path(LSERVICESNODEPORT_RESOURCE,
|
||||
parent_resource_id=lsn_id,
|
||||
resource_id=lsn_port_id,
|
||||
extra_action=action),
|
||||
jsonutils.dumps(obj),
|
||||
cluster=cluster)
|
||||
|
||||
|
||||
def _get_opts(name, value):
|
||||
return {"name": name, "value": str(value)}
|
||||
|
||||
|
||||
def lsn_port_dhcp_configure(
|
||||
cluster, lsn_id, lsn_port_id, is_enabled=True, dhcp_options=None):
|
||||
dhcp_options = dhcp_options or {}
|
||||
opts = [_get_opts(key, val) for key, val in dhcp_options.iteritems()]
|
||||
dhcp_obj = {'options': opts}
|
||||
_lsn_port_configure_action(
|
||||
cluster, lsn_id, lsn_port_id, 'dhcp', is_enabled, dhcp_obj)
|
||||
|
||||
|
||||
def lsn_metadata_configure(
|
||||
cluster, lsn_id, is_enabled=True, metadata_info=None):
|
||||
meta_obj = {
|
||||
'metadata_server_ip': metadata_info['metadata_server_ip'],
|
||||
'metadata_server_port': metadata_info['metadata_server_port'],
|
||||
}
|
||||
if metadata_info:
|
||||
opts = [
|
||||
_get_opts(opt, metadata_info[opt])
|
||||
for opt in SUPPORTED_METADATA_OPTIONS
|
||||
if metadata_info.get(opt)
|
||||
]
|
||||
if opts:
|
||||
meta_obj["options"] = opts
|
||||
_lsn_configure_action(
|
||||
cluster, lsn_id, 'metadata-proxy', is_enabled, meta_obj)
|
||||
|
||||
|
||||
def _lsn_port_host_action(
|
||||
cluster, lsn_id, lsn_port_id, host_obj, extra_action, action):
|
||||
nsxlib.do_request(HTTP_POST,
|
||||
nsxlib._build_uri_path(LSERVICESNODEPORT_RESOURCE,
|
||||
parent_resource_id=lsn_id,
|
||||
resource_id=lsn_port_id,
|
||||
extra_action=extra_action,
|
||||
filters={"action": action}),
|
||||
jsonutils.dumps(host_obj),
|
||||
cluster=cluster)
|
||||
|
||||
|
||||
def lsn_port_dhcp_host_add(cluster, lsn_id, lsn_port_id, host_data):
|
||||
_lsn_port_host_action(
|
||||
cluster, lsn_id, lsn_port_id, host_data, 'dhcp', 'add_host')
|
||||
|
||||
|
||||
def lsn_port_dhcp_host_remove(cluster, lsn_id, lsn_port_id, host_data):
|
||||
_lsn_port_host_action(
|
||||
cluster, lsn_id, lsn_port_id, host_data, 'dhcp', 'remove_host')
|
||||
|
||||
|
||||
def lsn_port_metadata_host_add(cluster, lsn_id, lsn_port_id, host_data):
|
||||
_lsn_port_host_action(
|
||||
cluster, lsn_id, lsn_port_id, host_data, 'metadata-proxy', 'add_host')
|
||||
|
||||
|
||||
def lsn_port_metadata_host_remove(cluster, lsn_id, lsn_port_id, host_data):
|
||||
_lsn_port_host_action(cluster, lsn_id, lsn_port_id,
|
||||
host_data, 'metadata-proxy', 'remove_host')
|
@ -1,72 +0,0 @@
|
||||
# Copyright 2014 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_serialization import jsonutils
|
||||
from oslo_utils import excutils
|
||||
|
||||
from neutron.api.v2 import attributes as attr
|
||||
from neutron.common import exceptions as exception
|
||||
from neutron.openstack.common import log
|
||||
from neutron.plugins.vmware.api_client import exception as api_exc
|
||||
from neutron.plugins.vmware.common import utils
|
||||
from neutron.plugins.vmware import nsxlib
|
||||
|
||||
HTTP_POST = "POST"
|
||||
HTTP_DELETE = "DELETE"
|
||||
|
||||
LQUEUE_RESOURCE = "lqueue"
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
def create_lqueue(cluster, queue_data):
|
||||
params = {
|
||||
'name': 'display_name',
|
||||
'qos_marking': 'qos_marking',
|
||||
'min': 'min_bandwidth_rate',
|
||||
'max': 'max_bandwidth_rate',
|
||||
'dscp': 'dscp'
|
||||
}
|
||||
queue_obj = dict(
|
||||
(nsx_name, queue_data.get(api_name))
|
||||
for api_name, nsx_name in params.iteritems()
|
||||
if attr.is_attr_set(queue_data.get(api_name))
|
||||
)
|
||||
if 'display_name' in queue_obj:
|
||||
queue_obj['display_name'] = utils.check_and_truncate(
|
||||
queue_obj['display_name'])
|
||||
|
||||
queue_obj['tags'] = utils.get_tags()
|
||||
try:
|
||||
return nsxlib.do_request(HTTP_POST,
|
||||
nsxlib._build_uri_path(LQUEUE_RESOURCE),
|
||||
jsonutils.dumps(queue_obj),
|
||||
cluster=cluster)['uuid']
|
||||
except api_exc.NsxApiException:
|
||||
# FIXME(salv-orlando): This should not raise NeutronException
|
||||
with excutils.save_and_reraise_exception():
|
||||
raise exception.NeutronException()
|
||||
|
||||
|
||||
def delete_lqueue(cluster, queue_id):
|
||||
try:
|
||||
nsxlib.do_request(HTTP_DELETE,
|
||||
nsxlib._build_uri_path(LQUEUE_RESOURCE,
|
||||
resource_id=queue_id),
|
||||
cluster=cluster)
|
||||
except Exception:
|
||||
# FIXME(salv-orlando): This should not raise NeutronException
|
||||
with excutils.save_and_reraise_exception():
|
||||
raise exception.NeutronException()
|
@ -1,707 +0,0 @@
|
||||
# Copyright 2014 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_serialization import jsonutils
|
||||
from oslo_utils import excutils
|
||||
|
||||
from neutron.common import exceptions as exception
|
||||
from neutron.i18n import _LE, _LI, _LW
|
||||
from neutron.openstack.common import log
|
||||
from neutron.plugins.vmware.api_client import exception as api_exc
|
||||
from neutron.plugins.vmware.common import exceptions as nsx_exc
|
||||
from neutron.plugins.vmware.common import utils
|
||||
from neutron.plugins.vmware import nsxlib
|
||||
from neutron.plugins.vmware.nsxlib import switch
|
||||
from neutron.plugins.vmware.nsxlib import versioning
|
||||
|
||||
# @versioning.versioned decorator makes the apparent function body
|
||||
# totally unrelated to the real function. This confuses pylint :(
|
||||
# pylint: disable=assignment-from-no-return
|
||||
|
||||
HTTP_GET = "GET"
|
||||
HTTP_POST = "POST"
|
||||
HTTP_DELETE = "DELETE"
|
||||
HTTP_PUT = "PUT"
|
||||
|
||||
LROUTER_RESOURCE = "lrouter"
|
||||
LROUTER_RESOURCE = "lrouter"
|
||||
LROUTERPORT_RESOURCE = "lport/%s" % LROUTER_RESOURCE
|
||||
LROUTERRIB_RESOURCE = "rib/%s" % LROUTER_RESOURCE
|
||||
LROUTERNAT_RESOURCE = "nat/lrouter"
|
||||
# Constants for NAT rules
|
||||
MATCH_KEYS = ["destination_ip_addresses", "destination_port_max",
|
||||
"destination_port_min", "source_ip_addresses",
|
||||
"source_port_max", "source_port_min", "protocol"]
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
def _prepare_lrouter_body(name, neutron_router_id, tenant_id,
|
||||
router_type, distributed=None, **kwargs):
|
||||
body = {
|
||||
"display_name": utils.check_and_truncate(name),
|
||||
"tags": utils.get_tags(os_tid=tenant_id,
|
||||
q_router_id=neutron_router_id),
|
||||
"routing_config": {
|
||||
"type": router_type
|
||||
},
|
||||
"type": "LogicalRouterConfig",
|
||||
"replication_mode": cfg.CONF.NSX.replication_mode,
|
||||
}
|
||||
# add the distributed key only if not None (ie: True or False)
|
||||
if distributed is not None:
|
||||
body['distributed'] = distributed
|
||||
if kwargs:
|
||||
body["routing_config"].update(kwargs)
|
||||
return body
|
||||
|
||||
|
||||
def _create_implicit_routing_lrouter(cluster, neutron_router_id, tenant_id,
|
||||
display_name, nexthop, distributed=None):
|
||||
implicit_routing_config = {
|
||||
"default_route_next_hop": {
|
||||
"gateway_ip_address": nexthop,
|
||||
"type": "RouterNextHop"
|
||||
},
|
||||
}
|
||||
lrouter_obj = _prepare_lrouter_body(
|
||||
display_name, neutron_router_id, tenant_id,
|
||||
"SingleDefaultRouteImplicitRoutingConfig",
|
||||
distributed=distributed,
|
||||
**implicit_routing_config)
|
||||
return nsxlib.do_request(HTTP_POST,
|
||||
nsxlib._build_uri_path(LROUTER_RESOURCE),
|
||||
jsonutils.dumps(lrouter_obj), cluster=cluster)
|
||||
|
||||
|
||||
def create_implicit_routing_lrouter(cluster, neutron_router_id, tenant_id,
|
||||
display_name, nexthop):
|
||||
"""Create a NSX logical router on the specified cluster.
|
||||
|
||||
:param cluster: The target NSX cluster
|
||||
:param tenant_id: Identifier of the Openstack tenant for which
|
||||
the logical router is being created
|
||||
:param display_name: Descriptive name of this logical router
|
||||
:param nexthop: External gateway IP address for the logical router
|
||||
:raise NsxApiException: if there is a problem while communicating
|
||||
with the NSX controller
|
||||
"""
|
||||
return _create_implicit_routing_lrouter(
|
||||
cluster, neutron_router_id, tenant_id, display_name, nexthop)
|
||||
|
||||
|
||||
def create_implicit_routing_lrouter_with_distribution(
|
||||
cluster, neutron_router_id, tenant_id, display_name,
|
||||
nexthop, distributed=None):
|
||||
"""Create a NSX logical router on the specified cluster.
|
||||
|
||||
This function also allows for creating distributed lrouters
|
||||
:param cluster: The target NSX cluster
|
||||
:param tenant_id: Identifier of the Openstack tenant for which
|
||||
the logical router is being created
|
||||
:param display_name: Descriptive name of this logical router
|
||||
:param nexthop: External gateway IP address for the logical router
|
||||
:param distributed: True for distributed logical routers
|
||||
:raise NsxApiException: if there is a problem while communicating
|
||||
with the NSX controller
|
||||
"""
|
||||
return _create_implicit_routing_lrouter(
|
||||
cluster, neutron_router_id, tenant_id,
|
||||
display_name, nexthop, distributed)
|
||||
|
||||
|
||||
def create_explicit_routing_lrouter(cluster, neutron_router_id, tenant_id,
|
||||
display_name, nexthop, distributed=None):
|
||||
lrouter_obj = _prepare_lrouter_body(
|
||||
display_name, neutron_router_id, tenant_id,
|
||||
"RoutingTableRoutingConfig", distributed=distributed)
|
||||
router = nsxlib.do_request(HTTP_POST,
|
||||
nsxlib._build_uri_path(LROUTER_RESOURCE),
|
||||
jsonutils.dumps(lrouter_obj), cluster=cluster)
|
||||
default_gw = {'prefix': '0.0.0.0/0', 'next_hop_ip': nexthop}
|
||||
create_explicit_route_lrouter(cluster, router['uuid'], default_gw)
|
||||
return router
|
||||
|
||||
|
||||
def delete_lrouter(cluster, lrouter_id):
|
||||
nsxlib.do_request(HTTP_DELETE,
|
||||
nsxlib._build_uri_path(LROUTER_RESOURCE,
|
||||
resource_id=lrouter_id),
|
||||
cluster=cluster)
|
||||
|
||||
|
||||
def get_lrouter(cluster, lrouter_id):
|
||||
return nsxlib.do_request(HTTP_GET,
|
||||
nsxlib._build_uri_path(
|
||||
LROUTER_RESOURCE,
|
||||
resource_id=lrouter_id,
|
||||
relations='LogicalRouterStatus'),
|
||||
cluster=cluster)
|
||||
|
||||
|
||||
def query_lrouters(cluster, fields=None, filters=None):
|
||||
return nsxlib.get_all_query_pages(
|
||||
nsxlib._build_uri_path(LROUTER_RESOURCE,
|
||||
fields=fields,
|
||||
relations='LogicalRouterStatus',
|
||||
filters=filters),
|
||||
cluster)
|
||||
|
||||
|
||||
def get_lrouters(cluster, tenant_id, fields=None, filters=None):
|
||||
# FIXME(salv-orlando): Fields parameter is ignored in this routine
|
||||
actual_filters = {}
|
||||
if filters:
|
||||
actual_filters.update(filters)
|
||||
if tenant_id:
|
||||
actual_filters['tag'] = tenant_id
|
||||
actual_filters['tag_scope'] = 'os_tid'
|
||||
lrouter_fields = "uuid,display_name,fabric_status,tags"
|
||||
return query_lrouters(cluster, lrouter_fields, actual_filters)
|
||||
|
||||
|
||||
def update_implicit_routing_lrouter(cluster, r_id, display_name, nexthop):
|
||||
lrouter_obj = get_lrouter(cluster, r_id)
|
||||
if not display_name and not nexthop:
|
||||
# Nothing to update
|
||||
return lrouter_obj
|
||||
# It seems that this is faster than the doing an if on display_name
|
||||
lrouter_obj["display_name"] = (utils.check_and_truncate(display_name) or
|
||||
lrouter_obj["display_name"])
|
||||
if nexthop:
|
||||
nh_element = lrouter_obj["routing_config"].get(
|
||||
"default_route_next_hop")
|
||||
if nh_element:
|
||||
nh_element["gateway_ip_address"] = nexthop
|
||||
return nsxlib.do_request(HTTP_PUT,
|
||||
nsxlib._build_uri_path(LROUTER_RESOURCE,
|
||||
resource_id=r_id),
|
||||
jsonutils.dumps(lrouter_obj),
|
||||
cluster=cluster)
|
||||
|
||||
|
||||
def get_explicit_routes_lrouter(cluster, router_id, protocol_type='static'):
|
||||
static_filter = {'protocol': protocol_type}
|
||||
existing_routes = nsxlib.do_request(
|
||||
HTTP_GET,
|
||||
nsxlib._build_uri_path(LROUTERRIB_RESOURCE,
|
||||
filters=static_filter,
|
||||
fields="*",
|
||||
parent_resource_id=router_id),
|
||||
cluster=cluster)['results']
|
||||
return existing_routes
|
||||
|
||||
|
||||
def delete_explicit_route_lrouter(cluster, router_id, route_id):
|
||||
nsxlib.do_request(HTTP_DELETE,
|
||||
nsxlib._build_uri_path(LROUTERRIB_RESOURCE,
|
||||
resource_id=route_id,
|
||||
parent_resource_id=router_id),
|
||||
cluster=cluster)
|
||||
|
||||
|
||||
def create_explicit_route_lrouter(cluster, router_id, route):
|
||||
next_hop_ip = route.get("nexthop") or route.get("next_hop_ip")
|
||||
prefix = route.get("destination") or route.get("prefix")
|
||||
uuid = nsxlib.do_request(
|
||||
HTTP_POST,
|
||||
nsxlib._build_uri_path(LROUTERRIB_RESOURCE,
|
||||
parent_resource_id=router_id),
|
||||
jsonutils.dumps({
|
||||
"action": "accept",
|
||||
"next_hop_ip": next_hop_ip,
|
||||
"prefix": prefix,
|
||||
"protocol": "static"
|
||||
}),
|
||||
cluster=cluster)['uuid']
|
||||
return uuid
|
||||
|
||||
|
||||
def update_explicit_routes_lrouter(cluster, router_id, routes):
|
||||
# Update in bulk: delete them all, and add the ones specified
|
||||
# but keep track of what is been modified to allow roll-backs
|
||||
# in case of failures
|
||||
nsx_routes = get_explicit_routes_lrouter(cluster, router_id)
|
||||
try:
|
||||
deleted_routes = []
|
||||
added_routes = []
|
||||
# omit the default route (0.0.0.0/0) from the processing;
|
||||
# this must be handled through the nexthop for the router
|
||||
for route in nsx_routes:
|
||||
prefix = route.get("destination") or route.get("prefix")
|
||||
if prefix != '0.0.0.0/0':
|
||||
delete_explicit_route_lrouter(cluster,
|
||||
router_id,
|
||||
route['uuid'])
|
||||
deleted_routes.append(route)
|
||||
for route in routes:
|
||||
prefix = route.get("destination") or route.get("prefix")
|
||||
if prefix != '0.0.0.0/0':
|
||||
uuid = create_explicit_route_lrouter(cluster,
|
||||
router_id, route)
|
||||
added_routes.append(uuid)
|
||||
except api_exc.NsxApiException:
|
||||
LOG.exception(_LE('Cannot update NSX routes %(routes)s for '
|
||||
'router %(router_id)s'),
|
||||
{'routes': routes, 'router_id': router_id})
|
||||
# Roll back to keep NSX in consistent state
|
||||
with excutils.save_and_reraise_exception():
|
||||
if nsx_routes:
|
||||
if deleted_routes:
|
||||
for route in deleted_routes:
|
||||
create_explicit_route_lrouter(cluster,
|
||||
router_id, route)
|
||||
if added_routes:
|
||||
for route_id in added_routes:
|
||||
delete_explicit_route_lrouter(cluster,
|
||||
router_id, route_id)
|
||||
return nsx_routes
|
||||
|
||||
|
||||
def get_default_route_explicit_routing_lrouter_v33(cluster, router_id):
|
||||
static_filter = {"protocol": "static",
|
||||
"prefix": "0.0.0.0/0"}
|
||||
default_route = nsxlib.do_request(
|
||||
HTTP_GET,
|
||||
nsxlib._build_uri_path(LROUTERRIB_RESOURCE,
|
||||
filters=static_filter,
|
||||
fields="*",
|
||||
parent_resource_id=router_id),
|
||||
cluster=cluster)["results"][0]
|
||||
return default_route
|
||||
|
||||
|
||||
def get_default_route_explicit_routing_lrouter_v32(cluster, router_id):
|
||||
# Scan all routes because 3.2 does not support query by prefix
|
||||
all_routes = get_explicit_routes_lrouter(cluster, router_id)
|
||||
for route in all_routes:
|
||||
if route['prefix'] == '0.0.0.0/0':
|
||||
return route
|
||||
|
||||
|
||||
def update_default_gw_explicit_routing_lrouter(cluster, router_id, next_hop):
|
||||
default_route = get_default_route_explicit_routing_lrouter(cluster,
|
||||
router_id)
|
||||
if next_hop != default_route["next_hop_ip"]:
|
||||
new_default_route = {"action": "accept",
|
||||
"next_hop_ip": next_hop,
|
||||
"prefix": "0.0.0.0/0",
|
||||
"protocol": "static"}
|
||||
nsxlib.do_request(HTTP_PUT,
|
||||
nsxlib._build_uri_path(
|
||||
LROUTERRIB_RESOURCE,
|
||||
resource_id=default_route['uuid'],
|
||||
parent_resource_id=router_id),
|
||||
jsonutils.dumps(new_default_route),
|
||||
cluster=cluster)
|
||||
|
||||
|
||||
def update_explicit_routing_lrouter(cluster, router_id,
|
||||
display_name, next_hop, routes=None):
|
||||
update_implicit_routing_lrouter(cluster, router_id, display_name, next_hop)
|
||||
if next_hop:
|
||||
update_default_gw_explicit_routing_lrouter(cluster,
|
||||
router_id, next_hop)
|
||||
if routes is not None:
|
||||
return update_explicit_routes_lrouter(cluster, router_id, routes)
|
||||
|
||||
|
||||
def query_lrouter_lports(cluster, lr_uuid, fields="*",
|
||||
filters=None, relations=None):
|
||||
uri = nsxlib._build_uri_path(LROUTERPORT_RESOURCE,
|
||||
parent_resource_id=lr_uuid,
|
||||
fields=fields, filters=filters,
|
||||
relations=relations)
|
||||
return nsxlib.do_request(HTTP_GET, uri, cluster=cluster)['results']
|
||||
|
||||
|
||||
def create_router_lport(cluster, lrouter_uuid, tenant_id, neutron_port_id,
|
||||
display_name, admin_status_enabled, ip_addresses,
|
||||
mac_address=None):
|
||||
"""Creates a logical port on the assigned logical router."""
|
||||
lport_obj = dict(
|
||||
admin_status_enabled=admin_status_enabled,
|
||||
display_name=display_name,
|
||||
tags=utils.get_tags(os_tid=tenant_id, q_port_id=neutron_port_id),
|
||||
ip_addresses=ip_addresses,
|
||||
type="LogicalRouterPortConfig"
|
||||
)
|
||||
# Only add the mac_address to lport_obj if present. This is because
|
||||
# when creating the fake_ext_gw there is no mac_address present.
|
||||
if mac_address:
|
||||
lport_obj['mac_address'] = mac_address
|
||||
path = nsxlib._build_uri_path(LROUTERPORT_RESOURCE,
|
||||
parent_resource_id=lrouter_uuid)
|
||||
result = nsxlib.do_request(HTTP_POST, path, jsonutils.dumps(lport_obj),
|
||||
cluster=cluster)
|
||||
|
||||
LOG.debug("Created logical port %(lport_uuid)s on "
|
||||
"logical router %(lrouter_uuid)s",
|
||||
{'lport_uuid': result['uuid'],
|
||||
'lrouter_uuid': lrouter_uuid})
|
||||
return result
|
||||
|
||||
|
||||
def update_router_lport(cluster, lrouter_uuid, lrouter_port_uuid,
|
||||
tenant_id, neutron_port_id, display_name,
|
||||
admin_status_enabled, ip_addresses):
|
||||
"""Updates a logical port on the assigned logical router."""
|
||||
lport_obj = dict(
|
||||
admin_status_enabled=admin_status_enabled,
|
||||
display_name=display_name,
|
||||
tags=utils.get_tags(os_tid=tenant_id, q_port_id=neutron_port_id),
|
||||
ip_addresses=ip_addresses,
|
||||
type="LogicalRouterPortConfig"
|
||||
)
|
||||
# Do not pass null items to NSX
|
||||
for key in lport_obj.keys():
|
||||
if lport_obj[key] is None:
|
||||
del lport_obj[key]
|
||||
path = nsxlib._build_uri_path(LROUTERPORT_RESOURCE,
|
||||
lrouter_port_uuid,
|
||||
parent_resource_id=lrouter_uuid)
|
||||
result = nsxlib.do_request(HTTP_PUT, path,
|
||||
jsonutils.dumps(lport_obj),
|
||||
cluster=cluster)
|
||||
LOG.debug("Updated logical port %(lport_uuid)s on "
|
||||
"logical router %(lrouter_uuid)s",
|
||||
{'lport_uuid': lrouter_port_uuid, 'lrouter_uuid': lrouter_uuid})
|
||||
return result
|
||||
|
||||
|
||||
def delete_router_lport(cluster, lrouter_uuid, lport_uuid):
|
||||
"""Creates a logical port on the assigned logical router."""
|
||||
path = nsxlib._build_uri_path(LROUTERPORT_RESOURCE, lport_uuid,
|
||||
lrouter_uuid)
|
||||
nsxlib.do_request(HTTP_DELETE, path, cluster=cluster)
|
||||
LOG.debug("Delete logical router port %(lport_uuid)s on "
|
||||
"logical router %(lrouter_uuid)s",
|
||||
{'lport_uuid': lport_uuid,
|
||||
'lrouter_uuid': lrouter_uuid})
|
||||
|
||||
|
||||
def delete_peer_router_lport(cluster, lr_uuid, ls_uuid, lp_uuid):
|
||||
nsx_port = switch.get_port(cluster, ls_uuid, lp_uuid,
|
||||
relations="LogicalPortAttachment")
|
||||
relations = nsx_port.get('_relations')
|
||||
if relations:
|
||||
att_data = relations.get('LogicalPortAttachment')
|
||||
if att_data:
|
||||
lrp_uuid = att_data.get('peer_port_uuid')
|
||||
if lrp_uuid:
|
||||
delete_router_lport(cluster, lr_uuid, lrp_uuid)
|
||||
|
||||
|
||||
def find_router_gw_port(context, cluster, router_id):
|
||||
"""Retrieves the external gateway port for a NSX logical router."""
|
||||
|
||||
# Find the uuid of nsx ext gw logical router port
|
||||
# TODO(salvatore-orlando): Consider storing it in Neutron DB
|
||||
results = query_lrouter_lports(
|
||||
cluster, router_id,
|
||||
relations="LogicalPortAttachment")
|
||||
for lport in results:
|
||||
if '_relations' in lport:
|
||||
attachment = lport['_relations'].get('LogicalPortAttachment')
|
||||
if attachment and attachment.get('type') == 'L3GatewayAttachment':
|
||||
return lport
|
||||
|
||||
|
||||
def plug_router_port_attachment(cluster, router_id, port_id,
|
||||
attachment_uuid, nsx_attachment_type,
|
||||
attachment_vlan=None):
|
||||
"""Attach a router port to the given attachment.
|
||||
|
||||
Current attachment types:
|
||||
- PatchAttachment [-> logical switch port uuid]
|
||||
- L3GatewayAttachment [-> L3GatewayService uuid]
|
||||
For the latter attachment type a VLAN ID can be specified as well.
|
||||
"""
|
||||
uri = nsxlib._build_uri_path(LROUTERPORT_RESOURCE, port_id, router_id,
|
||||
is_attachment=True)
|
||||
attach_obj = {}
|
||||
attach_obj["type"] = nsx_attachment_type
|
||||
if nsx_attachment_type == "PatchAttachment":
|
||||
attach_obj["peer_port_uuid"] = attachment_uuid
|
||||
elif nsx_attachment_type == "L3GatewayAttachment":
|
||||
attach_obj["l3_gateway_service_uuid"] = attachment_uuid
|
||||
if attachment_vlan:
|
||||
attach_obj['vlan_id'] = attachment_vlan
|
||||
else:
|
||||
raise nsx_exc.InvalidAttachmentType(
|
||||
attachment_type=nsx_attachment_type)
|
||||
return nsxlib.do_request(
|
||||
HTTP_PUT, uri, jsonutils.dumps(attach_obj), cluster=cluster)
|
||||
|
||||
|
||||
def _create_nat_match_obj(**kwargs):
|
||||
nat_match_obj = {'ethertype': 'IPv4'}
|
||||
delta = set(kwargs.keys()) - set(MATCH_KEYS)
|
||||
if delta:
|
||||
raise Exception(_("Invalid keys for NAT match: %s"), delta)
|
||||
nat_match_obj.update(kwargs)
|
||||
return nat_match_obj
|
||||
|
||||
|
||||
def _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj):
|
||||
LOG.debug("Creating NAT rule: %s", nat_rule_obj)
|
||||
uri = nsxlib._build_uri_path(LROUTERNAT_RESOURCE,
|
||||
parent_resource_id=router_id)
|
||||
return nsxlib.do_request(HTTP_POST, uri, jsonutils.dumps(nat_rule_obj),
|
||||
cluster=cluster)
|
||||
|
||||
|
||||
def _build_snat_rule_obj(min_src_ip, max_src_ip, nat_match_obj):
|
||||
return {"to_source_ip_address_min": min_src_ip,
|
||||
"to_source_ip_address_max": max_src_ip,
|
||||
"type": "SourceNatRule",
|
||||
"match": nat_match_obj}
|
||||
|
||||
|
||||
def create_lrouter_nosnat_rule_v2(cluster, _router_id, _match_criteria=None):
|
||||
LOG.info(_LI("No SNAT rules cannot be applied as they are not available "
|
||||
"in this version of the NSX platform"))
|
||||
|
||||
|
||||
def create_lrouter_nodnat_rule_v2(cluster, _router_id, _match_criteria=None):
|
||||
LOG.info(_LI("No DNAT rules cannot be applied as they are not available "
|
||||
"in this version of the NSX platform"))
|
||||
|
||||
|
||||
def create_lrouter_snat_rule_v2(cluster, router_id,
|
||||
min_src_ip, max_src_ip, match_criteria=None):
|
||||
|
||||
nat_match_obj = _create_nat_match_obj(**match_criteria)
|
||||
nat_rule_obj = _build_snat_rule_obj(min_src_ip, max_src_ip, nat_match_obj)
|
||||
return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj)
|
||||
|
||||
|
||||
def create_lrouter_dnat_rule_v2(cluster, router_id, dst_ip,
|
||||
to_dst_port=None, match_criteria=None):
|
||||
|
||||
nat_match_obj = _create_nat_match_obj(**match_criteria)
|
||||
nat_rule_obj = {
|
||||
"to_destination_ip_address_min": dst_ip,
|
||||
"to_destination_ip_address_max": dst_ip,
|
||||
"type": "DestinationNatRule",
|
||||
"match": nat_match_obj
|
||||
}
|
||||
if to_dst_port:
|
||||
nat_rule_obj['to_destination_port'] = to_dst_port
|
||||
return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj)
|
||||
|
||||
|
||||
def create_lrouter_nosnat_rule_v3(cluster, router_id, order=None,
|
||||
match_criteria=None):
|
||||
nat_match_obj = _create_nat_match_obj(**match_criteria)
|
||||
nat_rule_obj = {
|
||||
"type": "NoSourceNatRule",
|
||||
"match": nat_match_obj
|
||||
}
|
||||
if order:
|
||||
nat_rule_obj['order'] = order
|
||||
return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj)
|
||||
|
||||
|
||||
def create_lrouter_nodnat_rule_v3(cluster, router_id, order=None,
|
||||
match_criteria=None):
|
||||
nat_match_obj = _create_nat_match_obj(**match_criteria)
|
||||
nat_rule_obj = {
|
||||
"type": "NoDestinationNatRule",
|
||||
"match": nat_match_obj
|
||||
}
|
||||
if order:
|
||||
nat_rule_obj['order'] = order
|
||||
return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj)
|
||||
|
||||
|
||||
def create_lrouter_snat_rule_v3(cluster, router_id, min_src_ip, max_src_ip,
|
||||
order=None, match_criteria=None):
|
||||
nat_match_obj = _create_nat_match_obj(**match_criteria)
|
||||
nat_rule_obj = _build_snat_rule_obj(min_src_ip, max_src_ip, nat_match_obj)
|
||||
if order:
|
||||
nat_rule_obj['order'] = order
|
||||
return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj)
|
||||
|
||||
|
||||
def create_lrouter_dnat_rule_v3(cluster, router_id, dst_ip, to_dst_port=None,
|
||||
order=None, match_criteria=None):
|
||||
|
||||
nat_match_obj = _create_nat_match_obj(**match_criteria)
|
||||
nat_rule_obj = {
|
||||
"to_destination_ip_address": dst_ip,
|
||||
"type": "DestinationNatRule",
|
||||
"match": nat_match_obj
|
||||
}
|
||||
if to_dst_port:
|
||||
nat_rule_obj['to_destination_port'] = to_dst_port
|
||||
if order:
|
||||
nat_rule_obj['order'] = order
|
||||
return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj)
|
||||
|
||||
|
||||
def delete_nat_rules_by_match(cluster, router_id, rule_type,
|
||||
max_num_expected,
|
||||
min_num_expected=0,
|
||||
raise_on_len_mismatch=True,
|
||||
**kwargs):
|
||||
# remove nat rules
|
||||
nat_rules = query_nat_rules(cluster, router_id)
|
||||
to_delete_ids = []
|
||||
for r in nat_rules:
|
||||
if (r['type'] != rule_type):
|
||||
continue
|
||||
|
||||
for key, value in kwargs.iteritems():
|
||||
if not (key in r['match'] and r['match'][key] == value):
|
||||
break
|
||||
else:
|
||||
to_delete_ids.append(r['uuid'])
|
||||
num_rules_to_delete = len(to_delete_ids)
|
||||
if (num_rules_to_delete < min_num_expected or
|
||||
num_rules_to_delete > max_num_expected):
|
||||
if raise_on_len_mismatch:
|
||||
raise nsx_exc.NatRuleMismatch(actual_rules=num_rules_to_delete,
|
||||
min_rules=min_num_expected,
|
||||
max_rules=max_num_expected)
|
||||
else:
|
||||
LOG.warn(_LW("Found %(actual_rule_num)d matching NAT rules, which "
|
||||
"is not in the expected range (%(min_exp_rule_num)d,"
|
||||
"%(max_exp_rule_num)d)"),
|
||||
{'actual_rule_num': num_rules_to_delete,
|
||||
'min_exp_rule_num': min_num_expected,
|
||||
'max_exp_rule_num': max_num_expected})
|
||||
|
||||
for rule_id in to_delete_ids:
|
||||
delete_router_nat_rule(cluster, router_id, rule_id)
|
||||
# Return number of deleted rules - useful at least for
|
||||
# testing purposes
|
||||
return num_rules_to_delete
|
||||
|
||||
|
||||
def delete_router_nat_rule(cluster, router_id, rule_id):
|
||||
uri = nsxlib._build_uri_path(LROUTERNAT_RESOURCE, rule_id, router_id)
|
||||
nsxlib.do_request(HTTP_DELETE, uri, cluster=cluster)
|
||||
|
||||
|
||||
def query_nat_rules(cluster, router_id, fields="*", filters=None):
|
||||
uri = nsxlib._build_uri_path(LROUTERNAT_RESOURCE,
|
||||
parent_resource_id=router_id,
|
||||
fields=fields, filters=filters)
|
||||
return nsxlib.get_all_query_pages(uri, cluster)
|
||||
|
||||
|
||||
# NOTE(salvatore-orlando): The following FIXME applies in general to
|
||||
# each operation on list attributes.
|
||||
# FIXME(salvatore-orlando): need a lock around the list of IPs on an iface
|
||||
def update_lrouter_port_ips(cluster, lrouter_id, lport_id,
|
||||
ips_to_add, ips_to_remove):
|
||||
uri = nsxlib._build_uri_path(LROUTERPORT_RESOURCE, lport_id, lrouter_id)
|
||||
try:
|
||||
port = nsxlib.do_request(HTTP_GET, uri, cluster=cluster)
|
||||
# TODO(salvatore-orlando): Enforce ips_to_add intersection with
|
||||
# ips_to_remove is empty
|
||||
ip_address_set = set(port['ip_addresses'])
|
||||
ip_address_set = ip_address_set - set(ips_to_remove)
|
||||
ip_address_set = ip_address_set | set(ips_to_add)
|
||||
# Set is not JSON serializable - convert to list
|
||||
port['ip_addresses'] = list(ip_address_set)
|
||||
nsxlib.do_request(HTTP_PUT, uri, jsonutils.dumps(port),
|
||||
cluster=cluster)
|
||||
except exception.NotFound:
|
||||
# FIXME(salv-orlando):avoid raising different exception
|
||||
data = {'lport_id': lport_id, 'lrouter_id': lrouter_id}
|
||||
msg = (_("Router Port %(lport_id)s not found on router "
|
||||
"%(lrouter_id)s") % data)
|
||||
LOG.exception(msg)
|
||||
raise nsx_exc.NsxPluginException(err_msg=msg)
|
||||
except api_exc.NsxApiException as e:
|
||||
msg = _("An exception occurred while updating IP addresses on a "
|
||||
"router logical port:%s") % e
|
||||
LOG.exception(msg)
|
||||
raise nsx_exc.NsxPluginException(err_msg=msg)
|
||||
|
||||
|
||||
ROUTER_FUNC_DICT = {
|
||||
'create_lrouter': {
|
||||
2: {versioning.DEFAULT_VERSION: create_implicit_routing_lrouter, },
|
||||
3: {versioning.DEFAULT_VERSION: create_implicit_routing_lrouter,
|
||||
1: create_implicit_routing_lrouter_with_distribution,
|
||||
2: create_explicit_routing_lrouter, }, },
|
||||
'update_lrouter': {
|
||||
2: {versioning.DEFAULT_VERSION: update_implicit_routing_lrouter, },
|
||||
3: {versioning.DEFAULT_VERSION: update_implicit_routing_lrouter,
|
||||
2: update_explicit_routing_lrouter, }, },
|
||||
'create_lrouter_dnat_rule': {
|
||||
2: {versioning.DEFAULT_VERSION: create_lrouter_dnat_rule_v2, },
|
||||
3: {versioning.DEFAULT_VERSION: create_lrouter_dnat_rule_v3, }, },
|
||||
'create_lrouter_snat_rule': {
|
||||
2: {versioning.DEFAULT_VERSION: create_lrouter_snat_rule_v2, },
|
||||
3: {versioning.DEFAULT_VERSION: create_lrouter_snat_rule_v3, }, },
|
||||
'create_lrouter_nosnat_rule': {
|
||||
2: {versioning.DEFAULT_VERSION: create_lrouter_nosnat_rule_v2, },
|
||||
3: {versioning.DEFAULT_VERSION: create_lrouter_nosnat_rule_v3, }, },
|
||||
'create_lrouter_nodnat_rule': {
|
||||
2: {versioning.DEFAULT_VERSION: create_lrouter_nodnat_rule_v2, },
|
||||
3: {versioning.DEFAULT_VERSION: create_lrouter_nodnat_rule_v3, }, },
|
||||
'get_default_route_explicit_routing_lrouter': {
|
||||
3: {versioning.DEFAULT_VERSION:
|
||||
get_default_route_explicit_routing_lrouter_v32,
|
||||
2: get_default_route_explicit_routing_lrouter_v32, }, },
|
||||
}
|
||||
|
||||
|
||||
@versioning.versioned(ROUTER_FUNC_DICT)
|
||||
def create_lrouter(cluster, *args, **kwargs):
|
||||
if kwargs.get('distributed', None):
|
||||
v = cluster.api_client.get_version()
|
||||
if (v.major, v.minor) < (3, 1):
|
||||
raise nsx_exc.InvalidVersion(version=v)
|
||||
return v
|
||||
|
||||
|
||||
@versioning.versioned(ROUTER_FUNC_DICT)
|
||||
def get_default_route_explicit_routing_lrouter(cluster, *args, **kwargs):
|
||||
pass
|
||||
|
||||
|
||||
@versioning.versioned(ROUTER_FUNC_DICT)
|
||||
def update_lrouter(cluster, *args, **kwargs):
|
||||
if kwargs.get('routes', None):
|
||||
v = cluster.api_client.get_version()
|
||||
if (v.major, v.minor) < (3, 2):
|
||||
raise nsx_exc.InvalidVersion(version=v)
|
||||
return v
|
||||
|
||||
|
||||
@versioning.versioned(ROUTER_FUNC_DICT)
|
||||
def create_lrouter_dnat_rule(cluster, *args, **kwargs):
|
||||
pass
|
||||
|
||||
|
||||
@versioning.versioned(ROUTER_FUNC_DICT)
|
||||
def create_lrouter_snat_rule(cluster, *args, **kwargs):
|
||||
pass
|
||||
|
||||
|
||||
@versioning.versioned(ROUTER_FUNC_DICT)
|
||||
def create_lrouter_nosnat_rule(cluster, *args, **kwargs):
|
||||
pass
|
||||
|
||||
|
||||
@versioning.versioned(ROUTER_FUNC_DICT)
|
||||
def create_lrouter_nodnat_rule(cluster, *args, **kwargs):
|
||||
pass
|
@ -1,143 +0,0 @@
|
||||
# Copyright 2014 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_serialization import jsonutils
|
||||
from oslo_utils import excutils
|
||||
|
||||
from neutron.common import constants
|
||||
from neutron.common import exceptions
|
||||
from neutron.i18n import _LW
|
||||
from neutron.openstack.common import log
|
||||
from neutron.plugins.vmware.common import utils
|
||||
from neutron.plugins.vmware import nsxlib
|
||||
|
||||
HTTP_GET = "GET"
|
||||
HTTP_POST = "POST"
|
||||
HTTP_DELETE = "DELETE"
|
||||
HTTP_PUT = "PUT"
|
||||
|
||||
SECPROF_RESOURCE = "security-profile"
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
def mk_body(**kwargs):
|
||||
"""Convenience function creates and dumps dictionary to string.
|
||||
|
||||
:param kwargs: the key/value pirs to be dumped into a json string.
|
||||
:returns: a json string.
|
||||
"""
|
||||
return jsonutils.dumps(kwargs, ensure_ascii=False)
|
||||
|
||||
|
||||
def query_security_profiles(cluster, fields=None, filters=None):
|
||||
return nsxlib.get_all_query_pages(
|
||||
nsxlib._build_uri_path(SECPROF_RESOURCE,
|
||||
fields=fields,
|
||||
filters=filters),
|
||||
cluster)
|
||||
|
||||
|
||||
def create_security_profile(cluster, tenant_id, neutron_id, security_profile):
|
||||
"""Create a security profile on the NSX backend.
|
||||
|
||||
:param cluster: a NSX cluster object reference
|
||||
:param tenant_id: identifier of the Neutron tenant
|
||||
:param neutron_id: neutron security group identifier
|
||||
:param security_profile: dictionary with data for
|
||||
configuring the NSX security profile.
|
||||
"""
|
||||
path = "/ws.v1/security-profile"
|
||||
# Allow all dhcp responses and all ingress traffic
|
||||
hidden_rules = {'logical_port_egress_rules':
|
||||
[{'ethertype': 'IPv4',
|
||||
'protocol': constants.PROTO_NUM_UDP,
|
||||
'port_range_min': constants.DHCP_RESPONSE_PORT,
|
||||
'port_range_max': constants.DHCP_RESPONSE_PORT,
|
||||
'ip_prefix': '0.0.0.0/0'}],
|
||||
'logical_port_ingress_rules':
|
||||
[{'ethertype': 'IPv4'},
|
||||
{'ethertype': 'IPv6'}]}
|
||||
display_name = utils.check_and_truncate(security_profile.get('name'))
|
||||
# NOTE(salv-orlando): neutron-id tags are prepended with 'q' for
|
||||
# historical reasons
|
||||
body = mk_body(
|
||||
tags=utils.get_tags(os_tid=tenant_id, q_sec_group_id=neutron_id),
|
||||
display_name=display_name,
|
||||
logical_port_ingress_rules=(
|
||||
hidden_rules['logical_port_ingress_rules']),
|
||||
logical_port_egress_rules=hidden_rules['logical_port_egress_rules']
|
||||
)
|
||||
rsp = nsxlib.do_request(HTTP_POST, path, body, cluster=cluster)
|
||||
if security_profile.get('name') == 'default':
|
||||
# If security group is default allow ip traffic between
|
||||
# members of the same security profile is allowed and ingress traffic
|
||||
# from the switch
|
||||
rules = {'logical_port_egress_rules': [{'ethertype': 'IPv4',
|
||||
'profile_uuid': rsp['uuid']},
|
||||
{'ethertype': 'IPv6',
|
||||
'profile_uuid': rsp['uuid']}],
|
||||
'logical_port_ingress_rules': [{'ethertype': 'IPv4'},
|
||||
{'ethertype': 'IPv6'}]}
|
||||
|
||||
update_security_group_rules(cluster, rsp['uuid'], rules)
|
||||
LOG.debug("Created Security Profile: %s", rsp)
|
||||
return rsp
|
||||
|
||||
|
||||
def update_security_group_rules(cluster, spid, rules):
|
||||
path = "/ws.v1/security-profile/%s" % spid
|
||||
|
||||
# Allow all dhcp responses in
|
||||
rules['logical_port_egress_rules'].append(
|
||||
{'ethertype': 'IPv4', 'protocol': constants.PROTO_NUM_UDP,
|
||||
'port_range_min': constants.DHCP_RESPONSE_PORT,
|
||||
'port_range_max': constants.DHCP_RESPONSE_PORT,
|
||||
'ip_prefix': '0.0.0.0/0'})
|
||||
# If there are no ingress rules add bunk rule to drop all ingress traffic
|
||||
if not rules['logical_port_ingress_rules']:
|
||||
rules['logical_port_ingress_rules'].append(
|
||||
{'ethertype': 'IPv4', 'ip_prefix': '127.0.0.1/32'})
|
||||
try:
|
||||
body = mk_body(
|
||||
logical_port_ingress_rules=rules['logical_port_ingress_rules'],
|
||||
logical_port_egress_rules=rules['logical_port_egress_rules'])
|
||||
rsp = nsxlib.do_request(HTTP_PUT, path, body, cluster=cluster)
|
||||
except exceptions.NotFound as e:
|
||||
LOG.error(nsxlib.format_exception("Unknown", e, locals()))
|
||||
#FIXME(salvatore-orlando): This should not raise NeutronException
|
||||
raise exceptions.NeutronException()
|
||||
LOG.debug("Updated Security Profile: %s", rsp)
|
||||
return rsp
|
||||
|
||||
|
||||
def update_security_profile(cluster, spid, name):
|
||||
return nsxlib.do_request(
|
||||
HTTP_PUT,
|
||||
nsxlib._build_uri_path(SECPROF_RESOURCE, resource_id=spid),
|
||||
jsonutils.dumps({"display_name": utils.check_and_truncate(name)}),
|
||||
cluster=cluster)
|
||||
|
||||
|
||||
def delete_security_profile(cluster, spid):
|
||||
path = "/ws.v1/security-profile/%s" % spid
|
||||
|
||||
try:
|
||||
nsxlib.do_request(HTTP_DELETE, path, cluster=cluster)
|
||||
except exceptions.NotFound:
|
||||
with excutils.save_and_reraise_exception():
|
||||
# This is not necessarily an error condition
|
||||
LOG.warn(_LW("Unable to find security profile %s on NSX backend"),
|
||||
spid)
|
@ -1,398 +0,0 @@
|
||||
# Copyright 2014 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_serialization import jsonutils
|
||||
|
||||
from neutron.common import constants
|
||||
from neutron.common import exceptions as exception
|
||||
from neutron.i18n import _LE, _LI, _LW
|
||||
from neutron.openstack.common import log
|
||||
from neutron.plugins.vmware.api_client import exception as api_exc
|
||||
from neutron.plugins.vmware.common import exceptions as nsx_exc
|
||||
from neutron.plugins.vmware.common import utils
|
||||
from neutron.plugins.vmware import nsxlib
|
||||
|
||||
HTTP_GET = "GET"
|
||||
HTTP_POST = "POST"
|
||||
HTTP_DELETE = "DELETE"
|
||||
HTTP_PUT = "PUT"
|
||||
|
||||
LSWITCH_RESOURCE = "lswitch"
|
||||
LSWITCHPORT_RESOURCE = "lport/%s" % LSWITCH_RESOURCE
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
def _configure_extensions(lport_obj, mac_address, fixed_ips,
|
||||
port_security_enabled, security_profiles,
|
||||
queue_id, mac_learning_enabled,
|
||||
allowed_address_pairs):
|
||||
lport_obj['allowed_address_pairs'] = []
|
||||
if port_security_enabled:
|
||||
for fixed_ip in fixed_ips:
|
||||
ip_address = fixed_ip.get('ip_address')
|
||||
if ip_address:
|
||||
lport_obj['allowed_address_pairs'].append(
|
||||
{'mac_address': mac_address, 'ip_address': ip_address})
|
||||
# add address pair allowing src_ip 0.0.0.0 to leave
|
||||
# this is required for outgoing dhcp request
|
||||
lport_obj["allowed_address_pairs"].append(
|
||||
{"mac_address": mac_address,
|
||||
"ip_address": "0.0.0.0"})
|
||||
lport_obj['security_profiles'] = list(security_profiles or [])
|
||||
lport_obj['queue_uuid'] = queue_id
|
||||
if mac_learning_enabled is not None:
|
||||
lport_obj["mac_learning"] = mac_learning_enabled
|
||||
lport_obj["type"] = "LogicalSwitchPortConfig"
|
||||
for address_pair in list(allowed_address_pairs or []):
|
||||
lport_obj['allowed_address_pairs'].append(
|
||||
{'mac_address': address_pair['mac_address'],
|
||||
'ip_address': address_pair['ip_address']})
|
||||
|
||||
|
||||
def get_lswitch_by_id(cluster, lswitch_id):
|
||||
try:
|
||||
lswitch_uri_path = nsxlib._build_uri_path(
|
||||
LSWITCH_RESOURCE, lswitch_id,
|
||||
relations="LogicalSwitchStatus")
|
||||
return nsxlib.do_request(HTTP_GET, lswitch_uri_path, cluster=cluster)
|
||||
except exception.NotFound:
|
||||
# FIXME(salv-orlando): this should not raise a neutron exception
|
||||
raise exception.NetworkNotFound(net_id=lswitch_id)
|
||||
|
||||
|
||||
def get_lswitches(cluster, neutron_net_id):
|
||||
|
||||
def lookup_switches_by_tag():
|
||||
# Fetch extra logical switches
|
||||
lswitch_query_path = nsxlib._build_uri_path(
|
||||
LSWITCH_RESOURCE,
|
||||
fields="uuid,display_name,tags,lport_count",
|
||||
relations="LogicalSwitchStatus",
|
||||
filters={'tag': neutron_net_id,
|
||||
'tag_scope': 'quantum_net_id'})
|
||||
return nsxlib.get_all_query_pages(lswitch_query_path, cluster)
|
||||
|
||||
lswitch_uri_path = nsxlib._build_uri_path(LSWITCH_RESOURCE, neutron_net_id,
|
||||
relations="LogicalSwitchStatus")
|
||||
results = []
|
||||
try:
|
||||
ls = nsxlib.do_request(HTTP_GET, lswitch_uri_path, cluster=cluster)
|
||||
results.append(ls)
|
||||
for tag in ls['tags']:
|
||||
if (tag['scope'] == "multi_lswitch" and
|
||||
tag['tag'] == "True"):
|
||||
results.extend(lookup_switches_by_tag())
|
||||
except exception.NotFound:
|
||||
# This is legit if the neutron network was created using
|
||||
# a post-Havana version of the plugin
|
||||
results.extend(lookup_switches_by_tag())
|
||||
if results:
|
||||
return results
|
||||
else:
|
||||
raise exception.NetworkNotFound(net_id=neutron_net_id)
|
||||
|
||||
|
||||
def create_lswitch(cluster, neutron_net_id, tenant_id, display_name,
|
||||
transport_zones_config,
|
||||
shared=None,
|
||||
**kwargs):
|
||||
# The tag scope adopts a slightly different naming convention for
|
||||
# historical reasons
|
||||
lswitch_obj = {"display_name": utils.check_and_truncate(display_name),
|
||||
"transport_zones": transport_zones_config,
|
||||
"replication_mode": cfg.CONF.NSX.replication_mode,
|
||||
"tags": utils.get_tags(os_tid=tenant_id,
|
||||
quantum_net_id=neutron_net_id)}
|
||||
# TODO(salv-orlando): Now that we have async status synchronization
|
||||
# this tag is perhaps not needed anymore
|
||||
if shared:
|
||||
lswitch_obj["tags"].append({"tag": "true",
|
||||
"scope": "shared"})
|
||||
if "tags" in kwargs:
|
||||
lswitch_obj["tags"].extend(kwargs["tags"])
|
||||
uri = nsxlib._build_uri_path(LSWITCH_RESOURCE)
|
||||
lswitch = nsxlib.do_request(HTTP_POST, uri, jsonutils.dumps(lswitch_obj),
|
||||
cluster=cluster)
|
||||
LOG.debug("Created logical switch: %s", lswitch['uuid'])
|
||||
return lswitch
|
||||
|
||||
|
||||
def update_lswitch(cluster, lswitch_id, display_name,
|
||||
tenant_id=None, **kwargs):
|
||||
uri = nsxlib._build_uri_path(LSWITCH_RESOURCE, resource_id=lswitch_id)
|
||||
lswitch_obj = {"display_name": utils.check_and_truncate(display_name)}
|
||||
# NOTE: tag update will not 'merge' existing tags with new ones.
|
||||
tags = []
|
||||
if tenant_id:
|
||||
tags = utils.get_tags(os_tid=tenant_id)
|
||||
# The 'tags' kwarg might existing and be None
|
||||
tags.extend(kwargs.get('tags') or [])
|
||||
if tags:
|
||||
lswitch_obj['tags'] = tags
|
||||
try:
|
||||
return nsxlib.do_request(HTTP_PUT, uri, jsonutils.dumps(lswitch_obj),
|
||||
cluster=cluster)
|
||||
except exception.NotFound:
|
||||
LOG.exception(_LE("Network not found."))
|
||||
raise exception.NetworkNotFound(net_id=lswitch_id)
|
||||
|
||||
|
||||
def delete_network(cluster, net_id, lswitch_id):
|
||||
delete_networks(cluster, net_id, [lswitch_id])
|
||||
|
||||
|
||||
#TODO(salvatore-orlando): Simplify and harmonize
|
||||
def delete_networks(cluster, net_id, lswitch_ids):
|
||||
for ls_id in lswitch_ids:
|
||||
path = "/ws.v1/lswitch/%s" % ls_id
|
||||
try:
|
||||
nsxlib.do_request(HTTP_DELETE, path, cluster=cluster)
|
||||
except exception.NotFound:
|
||||
LOG.exception(_LE("Network not found."))
|
||||
raise exception.NetworkNotFound(net_id=ls_id)
|
||||
|
||||
|
||||
def query_lswitch_lports(cluster, ls_uuid, fields="*",
|
||||
filters=None, relations=None):
|
||||
# Fix filter for attachments
|
||||
if filters and "attachment" in filters:
|
||||
filters['attachment_vif_uuid'] = filters["attachment"]
|
||||
del filters['attachment']
|
||||
uri = nsxlib._build_uri_path(LSWITCHPORT_RESOURCE,
|
||||
parent_resource_id=ls_uuid,
|
||||
fields=fields,
|
||||
filters=filters,
|
||||
relations=relations)
|
||||
return nsxlib.do_request(HTTP_GET, uri, cluster=cluster)['results']
|
||||
|
||||
|
||||
def delete_port(cluster, switch, port):
|
||||
uri = "/ws.v1/lswitch/" + switch + "/lport/" + port
|
||||
try:
|
||||
nsxlib.do_request(HTTP_DELETE, uri, cluster=cluster)
|
||||
except exception.NotFound:
|
||||
LOG.exception(_LE("Port or Network not found"))
|
||||
raise exception.PortNotFoundOnNetwork(
|
||||
net_id=switch, port_id=port)
|
||||
except api_exc.NsxApiException:
|
||||
raise exception.NeutronException()
|
||||
|
||||
|
||||
def get_ports(cluster, networks=None, devices=None, tenants=None):
|
||||
vm_filter_obsolete = ""
|
||||
vm_filter = ""
|
||||
tenant_filter = ""
|
||||
# This is used when calling delete_network. Neutron checks to see if
|
||||
# the network has any ports.
|
||||
if networks:
|
||||
# FIXME (Aaron) If we get more than one network_id this won't work
|
||||
lswitch = networks[0]
|
||||
else:
|
||||
lswitch = "*"
|
||||
if devices:
|
||||
for device_id in devices:
|
||||
vm_filter_obsolete = '&'.join(
|
||||
["tag_scope=vm_id",
|
||||
"tag=%s" % utils.device_id_to_vm_id(device_id,
|
||||
obfuscate=True),
|
||||
vm_filter_obsolete])
|
||||
vm_filter = '&'.join(
|
||||
["tag_scope=vm_id",
|
||||
"tag=%s" % utils.device_id_to_vm_id(device_id),
|
||||
vm_filter])
|
||||
if tenants:
|
||||
for tenant in tenants:
|
||||
tenant_filter = '&'.join(
|
||||
["tag_scope=os_tid",
|
||||
"tag=%s" % tenant,
|
||||
tenant_filter])
|
||||
|
||||
nsx_lports = {}
|
||||
lport_fields_str = ("tags,admin_status_enabled,display_name,"
|
||||
"fabric_status_up")
|
||||
try:
|
||||
lport_query_path_obsolete = (
|
||||
"/ws.v1/lswitch/%s/lport?fields=%s&%s%stag_scope=q_port_id"
|
||||
"&relations=LogicalPortStatus" %
|
||||
(lswitch, lport_fields_str, vm_filter_obsolete, tenant_filter))
|
||||
lport_query_path = (
|
||||
"/ws.v1/lswitch/%s/lport?fields=%s&%s%stag_scope=q_port_id"
|
||||
"&relations=LogicalPortStatus" %
|
||||
(lswitch, lport_fields_str, vm_filter, tenant_filter))
|
||||
try:
|
||||
# NOTE(armando-migliaccio): by querying with obsolete tag first
|
||||
# current deployments won't take the performance hit of a double
|
||||
# call. In release L-** or M-**, we might want to swap the calls
|
||||
# as it's likely that ports with the new tag would outnumber the
|
||||
# ones with the old tag
|
||||
ports = nsxlib.get_all_query_pages(lport_query_path_obsolete,
|
||||
cluster)
|
||||
if not ports:
|
||||
ports = nsxlib.get_all_query_pages(lport_query_path, cluster)
|
||||
except exception.NotFound:
|
||||
LOG.warn(_LW("Lswitch %s not found in NSX"), lswitch)
|
||||
ports = None
|
||||
|
||||
if ports:
|
||||
for port in ports:
|
||||
for tag in port["tags"]:
|
||||
if tag["scope"] == "q_port_id":
|
||||
nsx_lports[tag["tag"]] = port
|
||||
except Exception:
|
||||
err_msg = _("Unable to get ports")
|
||||
LOG.exception(err_msg)
|
||||
raise nsx_exc.NsxPluginException(err_msg=err_msg)
|
||||
return nsx_lports
|
||||
|
||||
|
||||
def get_port_by_neutron_tag(cluster, lswitch_uuid, neutron_port_id):
|
||||
"""Get port by neutron tag.
|
||||
|
||||
Returns the NSX UUID of the logical port with tag q_port_id equal to
|
||||
neutron_port_id or None if the port is not Found.
|
||||
"""
|
||||
uri = nsxlib._build_uri_path(LSWITCHPORT_RESOURCE,
|
||||
parent_resource_id=lswitch_uuid,
|
||||
fields='uuid',
|
||||
filters={'tag': neutron_port_id,
|
||||
'tag_scope': 'q_port_id'})
|
||||
LOG.debug("Looking for port with q_port_id tag '%(neutron_port_id)s' "
|
||||
"on: '%(lswitch_uuid)s'",
|
||||
{'neutron_port_id': neutron_port_id,
|
||||
'lswitch_uuid': lswitch_uuid})
|
||||
res = nsxlib.do_request(HTTP_GET, uri, cluster=cluster)
|
||||
num_results = len(res["results"])
|
||||
if num_results >= 1:
|
||||
if num_results > 1:
|
||||
LOG.warn(_LW("Found '%(num_ports)d' ports with "
|
||||
"q_port_id tag: '%(neutron_port_id)s'. "
|
||||
"Only 1 was expected."),
|
||||
{'num_ports': num_results,
|
||||
'neutron_port_id': neutron_port_id})
|
||||
return res["results"][0]
|
||||
|
||||
|
||||
def get_port(cluster, network, port, relations=None):
|
||||
LOG.info(_LI("get_port() %(network)s %(port)s"),
|
||||
{'network': network, 'port': port})
|
||||
uri = "/ws.v1/lswitch/" + network + "/lport/" + port + "?"
|
||||
if relations:
|
||||
uri += "relations=%s" % relations
|
||||
try:
|
||||
return nsxlib.do_request(HTTP_GET, uri, cluster=cluster)
|
||||
except exception.NotFound:
|
||||
LOG.exception(_LE("Port or Network not found."))
|
||||
raise exception.PortNotFoundOnNetwork(
|
||||
port_id=port, net_id=network)
|
||||
|
||||
|
||||
def update_port(cluster, lswitch_uuid, lport_uuid, neutron_port_id, tenant_id,
|
||||
display_name, device_id, admin_status_enabled,
|
||||
mac_address=None, fixed_ips=None, port_security_enabled=None,
|
||||
security_profiles=None, queue_id=None,
|
||||
mac_learning_enabled=None, allowed_address_pairs=None):
|
||||
lport_obj = dict(
|
||||
admin_status_enabled=admin_status_enabled,
|
||||
display_name=utils.check_and_truncate(display_name),
|
||||
tags=utils.get_tags(os_tid=tenant_id,
|
||||
q_port_id=neutron_port_id,
|
||||
vm_id=utils.device_id_to_vm_id(device_id)))
|
||||
|
||||
_configure_extensions(lport_obj, mac_address, fixed_ips,
|
||||
port_security_enabled, security_profiles,
|
||||
queue_id, mac_learning_enabled,
|
||||
allowed_address_pairs)
|
||||
|
||||
path = "/ws.v1/lswitch/" + lswitch_uuid + "/lport/" + lport_uuid
|
||||
try:
|
||||
result = nsxlib.do_request(HTTP_PUT, path, jsonutils.dumps(lport_obj),
|
||||
cluster=cluster)
|
||||
LOG.debug("Updated logical port %(result)s "
|
||||
"on logical switch %(uuid)s",
|
||||
{'result': result['uuid'], 'uuid': lswitch_uuid})
|
||||
return result
|
||||
except exception.NotFound:
|
||||
LOG.exception(_LE("Port or Network not found."))
|
||||
raise exception.PortNotFoundOnNetwork(
|
||||
port_id=lport_uuid, net_id=lswitch_uuid)
|
||||
|
||||
|
||||
def create_lport(cluster, lswitch_uuid, tenant_id, neutron_port_id,
|
||||
display_name, device_id, admin_status_enabled,
|
||||
mac_address=None, fixed_ips=None, port_security_enabled=None,
|
||||
security_profiles=None, queue_id=None,
|
||||
mac_learning_enabled=None, allowed_address_pairs=None):
|
||||
"""Creates a logical port on the assigned logical switch."""
|
||||
display_name = utils.check_and_truncate(display_name)
|
||||
lport_obj = dict(
|
||||
admin_status_enabled=admin_status_enabled,
|
||||
display_name=display_name,
|
||||
tags=utils.get_tags(os_tid=tenant_id,
|
||||
q_port_id=neutron_port_id,
|
||||
vm_id=utils.device_id_to_vm_id(device_id))
|
||||
)
|
||||
|
||||
_configure_extensions(lport_obj, mac_address, fixed_ips,
|
||||
port_security_enabled, security_profiles,
|
||||
queue_id, mac_learning_enabled,
|
||||
allowed_address_pairs)
|
||||
|
||||
path = nsxlib._build_uri_path(LSWITCHPORT_RESOURCE,
|
||||
parent_resource_id=lswitch_uuid)
|
||||
result = nsxlib.do_request(HTTP_POST, path, jsonutils.dumps(lport_obj),
|
||||
cluster=cluster)
|
||||
|
||||
LOG.debug("Created logical port %(result)s on logical switch %(uuid)s",
|
||||
{'result': result['uuid'], 'uuid': lswitch_uuid})
|
||||
return result
|
||||
|
||||
|
||||
def get_port_status(cluster, lswitch_id, port_id):
|
||||
"""Retrieve the operational status of the port."""
|
||||
try:
|
||||
r = nsxlib.do_request(HTTP_GET,
|
||||
"/ws.v1/lswitch/%s/lport/%s/status" %
|
||||
(lswitch_id, port_id), cluster=cluster)
|
||||
except exception.NotFound:
|
||||
LOG.exception(_LE("Port not found."))
|
||||
raise exception.PortNotFoundOnNetwork(
|
||||
port_id=port_id, net_id=lswitch_id)
|
||||
if r['link_status_up'] is True:
|
||||
return constants.PORT_STATUS_ACTIVE
|
||||
else:
|
||||
return constants.PORT_STATUS_DOWN
|
||||
|
||||
|
||||
def plug_interface(cluster, lswitch_id, lport_id, att_obj):
|
||||
return nsxlib.do_request(HTTP_PUT,
|
||||
nsxlib._build_uri_path(LSWITCHPORT_RESOURCE,
|
||||
lport_id, lswitch_id,
|
||||
is_attachment=True),
|
||||
jsonutils.dumps(att_obj),
|
||||
cluster=cluster)
|
||||
|
||||
|
||||
def plug_vif_interface(
|
||||
cluster, lswitch_id, port_id, port_type, attachment=None):
|
||||
"""Plug a VIF Attachment object in a logical port."""
|
||||
lport_obj = {}
|
||||
if attachment:
|
||||
lport_obj["vif_uuid"] = attachment
|
||||
|
||||
lport_obj["type"] = port_type
|
||||
return plug_interface(cluster, lswitch_id, port_id, lport_obj)
|
@ -1,66 +0,0 @@
|
||||
# Copyright 2014 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import inspect
|
||||
|
||||
from neutron.plugins.vmware.api_client import exception
|
||||
|
||||
DEFAULT_VERSION = -1
|
||||
|
||||
|
||||
def versioned(func_table):
|
||||
|
||||
def versioned_function(wrapped_func):
|
||||
func_name = wrapped_func.__name__
|
||||
|
||||
def dispatch_versioned_function(cluster, *args, **kwargs):
|
||||
# Call the wrapper function, in case we need to
|
||||
# run validation checks regarding versions. It
|
||||
# should return the NSX version
|
||||
v = (wrapped_func(cluster, *args, **kwargs) or
|
||||
cluster.api_client.get_version())
|
||||
func = get_function_by_version(func_table, func_name, v)
|
||||
func_kwargs = kwargs
|
||||
arg_spec = inspect.getargspec(func)
|
||||
if not arg_spec.keywords and not arg_spec.varargs:
|
||||
# drop args unknown to function from func_args
|
||||
arg_set = set(func_kwargs.keys())
|
||||
for arg in arg_set - set(arg_spec.args):
|
||||
del func_kwargs[arg]
|
||||
# NOTE(salvatore-orlando): shall we fail here if a required
|
||||
# argument is not passed, or let the called function raise?
|
||||
return func(cluster, *args, **func_kwargs)
|
||||
|
||||
return dispatch_versioned_function
|
||||
return versioned_function
|
||||
|
||||
|
||||
def get_function_by_version(func_table, func_name, ver):
|
||||
if ver:
|
||||
if ver.major not in func_table[func_name]:
|
||||
major = max(func_table[func_name].keys())
|
||||
minor = max(func_table[func_name][major].keys())
|
||||
if major > ver.major:
|
||||
raise NotImplementedError(_("Operation may not be supported"))
|
||||
else:
|
||||
major = ver.major
|
||||
minor = ver.minor
|
||||
if ver.minor not in func_table[func_name][major]:
|
||||
minor = DEFAULT_VERSION
|
||||
return func_table[func_name][major][minor]
|
||||
else:
|
||||
msg = _('NSX version is not set. Unable to complete request '
|
||||
'correctly. Check log for NSX communication errors.')
|
||||
raise exception.ServiceUnavailable(message=msg)
|
@ -15,6 +15,9 @@
|
||||
# under the License.
|
||||
#
|
||||
|
||||
from neutron.plugins.vmware.plugins import base
|
||||
from vmware_nsx.neutron.plugins.vmware.plugins import base as nsx_mh
|
||||
|
||||
NsxPlugin = base.NsxPluginV2
|
||||
NsxMhPlugin = nsx_mh.NsxPluginV2
|
||||
# The 'NsxPlugin' name will be deprecated in Liberty
|
||||
# and replaced by the 'NsxMhPlugin' name
|
||||
NsxPlugin = NsxMhPlugin
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,41 +0,0 @@
|
||||
# Copyright 2014 VMware, Inc.
|
||||
#
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import sys
|
||||
|
||||
from neutron.plugins.vmware.shell import commands as cmd
|
||||
from neutronclient import shell
|
||||
|
||||
|
||||
class NsxManage(shell.NeutronShell):
|
||||
|
||||
def __init__(self, api_version):
|
||||
super(NsxManage, self).__init__(api_version)
|
||||
self.command_manager.add_command('net-migrate', cmd.NetworkMigrate)
|
||||
self.command_manager.add_command('net-report', cmd.NetworkReport)
|
||||
|
||||
def build_option_parser(self, description, version):
|
||||
parser = super(NsxManage, self).build_option_parser(
|
||||
description, version)
|
||||
return parser
|
||||
|
||||
def initialize_app(self, argv):
|
||||
super(NsxManage, self).initialize_app(argv)
|
||||
self.client = self.client_manager.neutron
|
||||
|
||||
|
||||
def main():
|
||||
return NsxManage(shell.NEUTRON_API_VERSION).run(sys.argv[1:])
|
@ -1,67 +0,0 @@
|
||||
# Copyright 2014 VMware, Inc.
|
||||
#
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
from neutronclient.neutron import v2_0 as client
|
||||
|
||||
LSN_PATH = '/lsns'
|
||||
|
||||
|
||||
def print_report(write_func, report):
|
||||
write_func(_("\nService type = %s\n") % report['report']['type'])
|
||||
services = ','.join(report['report']['services'])
|
||||
ports = ','.join(report['report']['ports'])
|
||||
write_func(_("Service uuids = %s\n") % services)
|
||||
write_func(_("Port uuids = %s\n\n") % ports)
|
||||
|
||||
|
||||
class NetworkReport(client.NeutronCommand):
|
||||
"""Retrieve network migration report."""
|
||||
|
||||
def get_parser(self, prog_name):
|
||||
parser = super(NetworkReport, self).get_parser(prog_name)
|
||||
parser.add_argument('network', metavar='network',
|
||||
help=_('ID or name of network to run report on'))
|
||||
return parser
|
||||
|
||||
def run(self, parsed_args):
|
||||
net = parsed_args.network
|
||||
net_id = client.find_resourceid_by_name_or_id(self.app.client,
|
||||
'network', net)
|
||||
res = self.app.client.get("%s/%s" % (LSN_PATH, net_id))
|
||||
if res:
|
||||
self.app.stdout.write(_('Migration report is:\n'))
|
||||
print_report(self.app.stdout.write, res['lsn'])
|
||||
|
||||
|
||||
class NetworkMigrate(client.NeutronCommand):
|
||||
"""Perform network migration."""
|
||||
|
||||
def get_parser(self, prog_name):
|
||||
parser = super(NetworkMigrate, self).get_parser(prog_name)
|
||||
parser.add_argument('network', metavar='network',
|
||||
help=_('ID or name of network to migrate'))
|
||||
return parser
|
||||
|
||||
def run(self, parsed_args):
|
||||
net = parsed_args.network
|
||||
net_id = client.find_resourceid_by_name_or_id(self.app.client,
|
||||
'network', net)
|
||||
body = {'network': net_id}
|
||||
res = self.app.client.post(LSN_PATH, body={'lsn': body})
|
||||
if res:
|
||||
self.app.stdout.write(_('Migration has been successful:\n'))
|
||||
print_report(self.app.stdout.write, res['lsn'])
|
@ -1,80 +0,0 @@
|
||||
# Copyright 2013 VMware, Inc
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import base64
|
||||
|
||||
import eventlet
|
||||
from oslo_serialization import jsonutils
|
||||
|
||||
from neutron.plugins.vmware.vshield.common import exceptions
|
||||
|
||||
httplib2 = eventlet.import_patched('httplib2')
|
||||
|
||||
|
||||
def xmldumps(obj):
|
||||
config = ""
|
||||
if isinstance(obj, dict):
|
||||
for key, value in obj.iteritems():
|
||||
cfg = "<%s>%s</%s>" % (key, xmldumps(value), key)
|
||||
config += cfg
|
||||
elif isinstance(obj, list):
|
||||
for value in obj:
|
||||
config += xmldumps(value)
|
||||
else:
|
||||
config = obj
|
||||
|
||||
return config
|
||||
|
||||
|
||||
class VcnsApiHelper(object):
|
||||
errors = {
|
||||
303: exceptions.ResourceRedirect,
|
||||
400: exceptions.RequestBad,
|
||||
403: exceptions.Forbidden,
|
||||
404: exceptions.ResourceNotFound,
|
||||
415: exceptions.MediaTypeUnsupport,
|
||||
503: exceptions.ServiceUnavailable
|
||||
}
|
||||
|
||||
def __init__(self, address, user, password, format='json'):
|
||||
self.authToken = base64.encodestring("%s:%s" % (user, password))
|
||||
self.user = user
|
||||
self.passwd = password
|
||||
self.address = address
|
||||
self.format = format
|
||||
if format == 'json':
|
||||
self.encode = jsonutils.dumps
|
||||
else:
|
||||
self.encode = xmldumps
|
||||
|
||||
def request(self, method, uri, params=None):
|
||||
uri = self.address + uri
|
||||
http = httplib2.Http()
|
||||
http.disable_ssl_certificate_validation = True
|
||||
headers = {
|
||||
'Content-Type': 'application/' + self.format,
|
||||
'Accept': 'application/' + 'json',
|
||||
'Authorization': 'Basic ' + self.authToken
|
||||
}
|
||||
body = self.encode(params) if params else None
|
||||
header, response = http.request(uri, method,
|
||||
body=body, headers=headers)
|
||||
status = int(header['status'])
|
||||
if 200 <= status < 300:
|
||||
return header, response
|
||||
if status in self.errors:
|
||||
cls = self.errors[status]
|
||||
else:
|
||||
cls = exceptions.VcnsApiException
|
||||
raise cls(uri=uri, status=status, header=header, response=response)
|
@ -1,45 +0,0 @@
|
||||
# Copyright 2013 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
EDGE_ID = 'edge_id'
|
||||
ROUTER_ID = 'router_id'
|
||||
|
||||
# Interface
|
||||
EXTERNAL_VNIC_INDEX = 0
|
||||
INTERNAL_VNIC_INDEX = 1
|
||||
EXTERNAL_VNIC_NAME = "external"
|
||||
INTERNAL_VNIC_NAME = "internal"
|
||||
|
||||
INTEGRATION_LR_IPADDRESS = "169.254.2.1/28"
|
||||
INTEGRATION_EDGE_IPADDRESS = "169.254.2.3"
|
||||
INTEGRATION_SUBNET_NETMASK = "255.255.255.240"
|
||||
|
||||
# SNAT rule location
|
||||
PREPEND = 0
|
||||
APPEND = -1
|
||||
|
||||
# error code
|
||||
VCNS_ERROR_CODE_EDGE_NOT_RUNNING = 10013
|
||||
|
||||
SUFFIX_LENGTH = 8
|
||||
|
||||
|
||||
# router status by number
|
||||
class RouterStatus(object):
|
||||
ROUTER_STATUS_ACTIVE = 0
|
||||
ROUTER_STATUS_DOWN = 1
|
||||
ROUTER_STATUS_PENDING_CREATE = 2
|
||||
ROUTER_STATUS_PENDING_DELETE = 3
|
||||
ROUTER_STATUS_ERROR = 4
|
@ -1,68 +0,0 @@
|
||||
# Copyright 2013 VMware, Inc
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron.common import exceptions
|
||||
|
||||
|
||||
class VcnsException(exceptions.NeutronException):
|
||||
pass
|
||||
|
||||
|
||||
class VcnsGeneralException(VcnsException):
|
||||
def __init__(self, message):
|
||||
self.message = message
|
||||
super(VcnsGeneralException, self).__init__()
|
||||
|
||||
|
||||
class VcnsBadRequest(exceptions.BadRequest):
|
||||
pass
|
||||
|
||||
|
||||
class VcnsNotFound(exceptions.NotFound):
|
||||
message = _('%(resource)s not found: %(msg)s')
|
||||
|
||||
|
||||
class VcnsApiException(VcnsException):
|
||||
message = _("An unknown exception %(status)s occurred: %(response)s.")
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(VcnsApiException, self).__init__(**kwargs)
|
||||
|
||||
self.status = kwargs.get('status')
|
||||
self.header = kwargs.get('header')
|
||||
self.response = kwargs.get('response')
|
||||
|
||||
|
||||
class ResourceRedirect(VcnsApiException):
|
||||
message = _("Resource %(uri)s has been redirected")
|
||||
|
||||
|
||||
class RequestBad(VcnsApiException):
|
||||
message = _("Request %(uri)s is Bad, response %(response)s")
|
||||
|
||||
|
||||
class Forbidden(VcnsApiException):
|
||||
message = _("Forbidden: %(uri)s")
|
||||
|
||||
|
||||
class ResourceNotFound(VcnsApiException):
|
||||
message = _("Resource %(uri)s not found")
|
||||
|
||||
|
||||
class MediaTypeUnsupport(VcnsApiException):
|
||||
message = _("Media Type %(uri)s is not supported")
|
||||
|
||||
|
||||
class ServiceUnavailable(VcnsApiException):
|
||||
message = _("Service Unavailable: %(uri)s")
|
@ -1,661 +0,0 @@
|
||||
# Copyright 2013 VMware, Inc
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_serialization import jsonutils
|
||||
from oslo_utils import excutils
|
||||
|
||||
from neutron.i18n import _LE
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.vmware.common import utils
|
||||
from neutron.plugins.vmware.vshield.common import constants as vcns_const
|
||||
from neutron.plugins.vmware.vshield.common import exceptions
|
||||
from neutron.plugins.vmware.vshield.tasks import constants
|
||||
from neutron.plugins.vmware.vshield.tasks import tasks
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class EdgeApplianceDriver(object):
|
||||
def __init__(self):
|
||||
# store the last task per edge that has the latest config
|
||||
self.updated_task = {
|
||||
'nat': {},
|
||||
'route': {},
|
||||
}
|
||||
|
||||
def _assemble_edge(self, name, appliance_size="compact",
|
||||
deployment_container_id=None, datacenter_moid=None,
|
||||
enable_aesni=True, hypervisor_assist=False,
|
||||
enable_fips=False, remote_access=False):
|
||||
edge = {
|
||||
'name': name,
|
||||
'fqdn': name,
|
||||
'hypervisorAssist': hypervisor_assist,
|
||||
'type': 'gatewayServices',
|
||||
'enableAesni': enable_aesni,
|
||||
'enableFips': enable_fips,
|
||||
'cliSettings': {
|
||||
'remoteAccess': remote_access
|
||||
},
|
||||
'appliances': {
|
||||
'applianceSize': appliance_size
|
||||
},
|
||||
'vnics': {
|
||||
'vnics': []
|
||||
}
|
||||
}
|
||||
if deployment_container_id:
|
||||
edge['appliances']['deploymentContainerId'] = (
|
||||
deployment_container_id)
|
||||
if datacenter_moid:
|
||||
edge['datacenterMoid'] = datacenter_moid
|
||||
|
||||
return edge
|
||||
|
||||
def _assemble_edge_appliance(self, resource_pool_id, datastore_id):
|
||||
appliance = {}
|
||||
if resource_pool_id:
|
||||
appliance['resourcePoolId'] = resource_pool_id
|
||||
if datastore_id:
|
||||
appliance['datastoreId'] = datastore_id
|
||||
return appliance
|
||||
|
||||
def _assemble_edge_vnic(self, name, index, portgroup_id,
|
||||
primary_address=None, subnet_mask=None,
|
||||
secondary=None,
|
||||
type="internal",
|
||||
enable_proxy_arp=False,
|
||||
enable_send_redirects=True,
|
||||
is_connected=True,
|
||||
mtu=1500):
|
||||
vnic = {
|
||||
'index': index,
|
||||
'name': name,
|
||||
'type': type,
|
||||
'portgroupId': portgroup_id,
|
||||
'mtu': mtu,
|
||||
'enableProxyArp': enable_proxy_arp,
|
||||
'enableSendRedirects': enable_send_redirects,
|
||||
'isConnected': is_connected
|
||||
}
|
||||
if primary_address and subnet_mask:
|
||||
address_group = {
|
||||
'primaryAddress': primary_address,
|
||||
'subnetMask': subnet_mask
|
||||
}
|
||||
if secondary:
|
||||
address_group['secondaryAddresses'] = {
|
||||
'ipAddress': secondary,
|
||||
'type': 'IpAddressesDto'
|
||||
}
|
||||
|
||||
vnic['addressGroups'] = {
|
||||
'addressGroups': [address_group]
|
||||
}
|
||||
|
||||
return vnic
|
||||
|
||||
def _edge_status_to_level(self, status):
|
||||
if status == 'GREEN':
|
||||
status_level = vcns_const.RouterStatus.ROUTER_STATUS_ACTIVE
|
||||
elif status in ('GREY', 'YELLOW'):
|
||||
status_level = vcns_const.RouterStatus.ROUTER_STATUS_DOWN
|
||||
else:
|
||||
status_level = vcns_const.RouterStatus.ROUTER_STATUS_ERROR
|
||||
return status_level
|
||||
|
||||
def _enable_loadbalancer(self, edge):
|
||||
if not edge.get('featureConfigs') or (
|
||||
not edge['featureConfigs'].get('features')):
|
||||
edge['featureConfigs'] = {'features': []}
|
||||
edge['featureConfigs']['features'].append(
|
||||
{'featureType': 'loadbalancer_4.0',
|
||||
'enabled': True})
|
||||
|
||||
def get_edge_status(self, edge_id):
|
||||
try:
|
||||
response = self.vcns.get_edge_status(edge_id)[1]
|
||||
status_level = self._edge_status_to_level(
|
||||
response['edgeStatus'])
|
||||
except exceptions.VcnsApiException as e:
|
||||
LOG.exception(_LE("VCNS: Failed to get edge status:\n%s"),
|
||||
e.response)
|
||||
status_level = vcns_const.RouterStatus.ROUTER_STATUS_ERROR
|
||||
try:
|
||||
desc = jsonutils.loads(e.response)
|
||||
if desc.get('errorCode') == (
|
||||
vcns_const.VCNS_ERROR_CODE_EDGE_NOT_RUNNING):
|
||||
status_level = (
|
||||
vcns_const.RouterStatus.ROUTER_STATUS_DOWN)
|
||||
except ValueError:
|
||||
LOG.exception(e.response)
|
||||
|
||||
return status_level
|
||||
|
||||
def get_edges_statuses(self):
|
||||
edges_status_level = {}
|
||||
edges = self._get_edges()
|
||||
for edge in edges['edgePage'].get('data', []):
|
||||
edge_id = edge['id']
|
||||
status = edge['edgeStatus']
|
||||
edges_status_level[edge_id] = self._edge_status_to_level(status)
|
||||
|
||||
return edges_status_level
|
||||
|
||||
def _update_interface(self, task):
|
||||
edge_id = task.userdata['edge_id']
|
||||
config = task.userdata['config']
|
||||
LOG.debug("VCNS: start updating vnic %s", config)
|
||||
try:
|
||||
self.vcns.update_interface(edge_id, config)
|
||||
except exceptions.VcnsApiException as e:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("VCNS: Failed to update vnic %(config)s:\n"
|
||||
"%(response)s"), {
|
||||
'config': config,
|
||||
'response': e.response})
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("VCNS: Failed to update vnic %d"),
|
||||
config['index'])
|
||||
|
||||
return constants.TaskStatus.COMPLETED
|
||||
|
||||
def update_interface(self, router_id, edge_id, index, network,
|
||||
address=None, netmask=None, secondary=None,
|
||||
jobdata=None):
|
||||
LOG.debug("VCNS: update vnic %(index)d: %(addr)s %(netmask)s", {
|
||||
'index': index, 'addr': address, 'netmask': netmask})
|
||||
if index == vcns_const.EXTERNAL_VNIC_INDEX:
|
||||
name = vcns_const.EXTERNAL_VNIC_NAME
|
||||
intf_type = 'uplink'
|
||||
elif index == vcns_const.INTERNAL_VNIC_INDEX:
|
||||
name = vcns_const.INTERNAL_VNIC_NAME
|
||||
intf_type = 'internal'
|
||||
else:
|
||||
msg = _("Vnic %d currently not supported") % index
|
||||
raise exceptions.VcnsGeneralException(msg)
|
||||
|
||||
config = self._assemble_edge_vnic(
|
||||
name, index, network, address, netmask, secondary, type=intf_type)
|
||||
|
||||
userdata = {
|
||||
'edge_id': edge_id,
|
||||
'config': config,
|
||||
'jobdata': jobdata
|
||||
}
|
||||
task_name = "update-interface-%s-%d" % (edge_id, index)
|
||||
task = tasks.Task(task_name, router_id,
|
||||
self._update_interface, userdata=userdata)
|
||||
task.add_result_monitor(self.callbacks.interface_update_result)
|
||||
self.task_manager.add(task)
|
||||
return task
|
||||
|
||||
def _deploy_edge(self, task):
|
||||
userdata = task.userdata
|
||||
name = userdata['router_name']
|
||||
LOG.debug("VCNS: start deploying edge %s", name)
|
||||
request = userdata['request']
|
||||
try:
|
||||
header = self.vcns.deploy_edge(request)[0]
|
||||
objuri = header['location']
|
||||
job_id = objuri[objuri.rfind("/") + 1:]
|
||||
response = self.vcns.get_edge_id(job_id)[1]
|
||||
edge_id = response['edgeId']
|
||||
LOG.debug("VCNS: deploying edge %s", edge_id)
|
||||
userdata['edge_id'] = edge_id
|
||||
status = constants.TaskStatus.PENDING
|
||||
except exceptions.VcnsApiException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("VCNS: deploy edge failed for router %s."),
|
||||
name)
|
||||
|
||||
return status
|
||||
|
||||
def _status_edge(self, task):
|
||||
edge_id = task.userdata['edge_id']
|
||||
try:
|
||||
response = self.vcns.get_edge_deploy_status(edge_id)[1]
|
||||
task.userdata['retries'] = 0
|
||||
system_status = response.get('systemStatus', None)
|
||||
if system_status is None:
|
||||
status = constants.TaskStatus.PENDING
|
||||
elif system_status == 'good':
|
||||
status = constants.TaskStatus.COMPLETED
|
||||
else:
|
||||
status = constants.TaskStatus.ERROR
|
||||
except exceptions.VcnsApiException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("VCNS: Edge %s status query failed."),
|
||||
edge_id)
|
||||
except Exception:
|
||||
retries = task.userdata.get('retries', 0) + 1
|
||||
if retries < 3:
|
||||
task.userdata['retries'] = retries
|
||||
LOG.exception(_LE("VCNS: Unable to retrieve edge %(edge_id)s "
|
||||
"status. Retry %(retries)d."),
|
||||
{'edge_id': edge_id,
|
||||
'retries': retries})
|
||||
status = constants.TaskStatus.PENDING
|
||||
else:
|
||||
LOG.exception(_LE("VCNS: Unable to retrieve edge %s status. "
|
||||
"Abort."), edge_id)
|
||||
status = constants.TaskStatus.ERROR
|
||||
LOG.debug("VCNS: Edge %s status", edge_id)
|
||||
return status
|
||||
|
||||
def _result_edge(self, task):
|
||||
router_name = task.userdata['router_name']
|
||||
edge_id = task.userdata.get('edge_id')
|
||||
if task.status != constants.TaskStatus.COMPLETED:
|
||||
LOG.error(_LE("VCNS: Failed to deploy edge %(edge_id)s "
|
||||
"for %(name)s, status %(status)d"), {
|
||||
'edge_id': edge_id,
|
||||
'name': router_name,
|
||||
'status': task.status
|
||||
})
|
||||
else:
|
||||
LOG.debug("VCNS: Edge %(edge_id)s deployed for "
|
||||
"router %(name)s", {
|
||||
'edge_id': edge_id, 'name': router_name
|
||||
})
|
||||
|
||||
def _delete_edge(self, task):
|
||||
edge_id = task.userdata['edge_id']
|
||||
LOG.debug("VCNS: start destroying edge %s", edge_id)
|
||||
status = constants.TaskStatus.COMPLETED
|
||||
if edge_id:
|
||||
try:
|
||||
self.vcns.delete_edge(edge_id)
|
||||
except exceptions.ResourceNotFound:
|
||||
pass
|
||||
except exceptions.VcnsApiException as e:
|
||||
LOG.exception(_LE("VCNS: Failed to delete %(edge_id)s:\n"
|
||||
"%(response)s"),
|
||||
{'edge_id': edge_id, 'response': e.response})
|
||||
status = constants.TaskStatus.ERROR
|
||||
except Exception:
|
||||
LOG.exception(_LE("VCNS: Failed to delete %s"), edge_id)
|
||||
status = constants.TaskStatus.ERROR
|
||||
|
||||
return status
|
||||
|
||||
def _get_edges(self):
|
||||
try:
|
||||
return self.vcns.get_edges()[1]
|
||||
except exceptions.VcnsApiException as e:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("VCNS: Failed to get edges:\n%s"),
|
||||
e.response)
|
||||
|
||||
def deploy_edge(self, router_id, name, internal_network, jobdata=None,
|
||||
wait_for_exec=False, loadbalancer_enable=True):
|
||||
task_name = 'deploying-%s' % name
|
||||
edge_name = name
|
||||
edge = self._assemble_edge(
|
||||
edge_name, datacenter_moid=self.datacenter_moid,
|
||||
deployment_container_id=self.deployment_container_id,
|
||||
appliance_size='large', remote_access=True)
|
||||
appliance = self._assemble_edge_appliance(self.resource_pool_id,
|
||||
self.datastore_id)
|
||||
if appliance:
|
||||
edge['appliances']['appliances'] = [appliance]
|
||||
|
||||
vnic_external = self._assemble_edge_vnic(
|
||||
vcns_const.EXTERNAL_VNIC_NAME, vcns_const.EXTERNAL_VNIC_INDEX,
|
||||
self.external_network, type="uplink")
|
||||
edge['vnics']['vnics'].append(vnic_external)
|
||||
vnic_inside = self._assemble_edge_vnic(
|
||||
vcns_const.INTERNAL_VNIC_NAME, vcns_const.INTERNAL_VNIC_INDEX,
|
||||
internal_network,
|
||||
vcns_const.INTEGRATION_EDGE_IPADDRESS,
|
||||
vcns_const.INTEGRATION_SUBNET_NETMASK,
|
||||
type="internal")
|
||||
edge['vnics']['vnics'].append(vnic_inside)
|
||||
if loadbalancer_enable:
|
||||
self._enable_loadbalancer(edge)
|
||||
userdata = {
|
||||
'request': edge,
|
||||
'router_name': name,
|
||||
'jobdata': jobdata
|
||||
}
|
||||
task = tasks.Task(task_name, router_id,
|
||||
self._deploy_edge,
|
||||
status_callback=self._status_edge,
|
||||
result_callback=self._result_edge,
|
||||
userdata=userdata)
|
||||
task.add_executed_monitor(self.callbacks.edge_deploy_started)
|
||||
task.add_result_monitor(self.callbacks.edge_deploy_result)
|
||||
self.task_manager.add(task)
|
||||
|
||||
if wait_for_exec:
|
||||
# wait until the deploy task is executed so edge_id is available
|
||||
task.wait(constants.TaskState.EXECUTED)
|
||||
|
||||
return task
|
||||
|
||||
def delete_edge(self, router_id, edge_id, jobdata=None):
|
||||
task_name = 'delete-%s' % edge_id
|
||||
userdata = {
|
||||
'router_id': router_id,
|
||||
'edge_id': edge_id,
|
||||
'jobdata': jobdata
|
||||
}
|
||||
task = tasks.Task(task_name, router_id, self._delete_edge,
|
||||
userdata=userdata)
|
||||
task.add_result_monitor(self.callbacks.edge_delete_result)
|
||||
self.task_manager.add(task)
|
||||
return task
|
||||
|
||||
def _assemble_nat_rule(self, action, original_address,
|
||||
translated_address,
|
||||
vnic_index=vcns_const.EXTERNAL_VNIC_INDEX,
|
||||
enabled=True):
|
||||
nat_rule = {}
|
||||
nat_rule['action'] = action
|
||||
nat_rule['vnic'] = vnic_index
|
||||
nat_rule['originalAddress'] = original_address
|
||||
nat_rule['translatedAddress'] = translated_address
|
||||
nat_rule['enabled'] = enabled
|
||||
return nat_rule
|
||||
|
||||
def get_nat_config(self, edge_id):
|
||||
try:
|
||||
return self.vcns.get_nat_config(edge_id)[1]
|
||||
except exceptions.VcnsApiException as e:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("VCNS: Failed to get nat config:\n%s"),
|
||||
e.response)
|
||||
|
||||
def _create_nat_rule(self, task):
|
||||
# TODO(fank): use POST for optimization
|
||||
# return rule_id for future reference
|
||||
rule = task.userdata['rule']
|
||||
LOG.debug("VCNS: start creating nat rules: %s", rule)
|
||||
edge_id = task.userdata['edge_id']
|
||||
nat = self.get_nat_config(edge_id)
|
||||
location = task.userdata['location']
|
||||
|
||||
del nat['version']
|
||||
|
||||
if location is None or location == vcns_const.APPEND:
|
||||
nat['rules']['natRulesDtos'].append(rule)
|
||||
else:
|
||||
nat['rules']['natRulesDtos'].insert(location, rule)
|
||||
|
||||
try:
|
||||
self.vcns.update_nat_config(edge_id, nat)
|
||||
status = constants.TaskStatus.COMPLETED
|
||||
except exceptions.VcnsApiException as e:
|
||||
LOG.exception(_LE("VCNS: Failed to create snat rule:\n%s"),
|
||||
e.response)
|
||||
status = constants.TaskStatus.ERROR
|
||||
|
||||
return status
|
||||
|
||||
def create_snat_rule(self, router_id, edge_id, src, translated,
|
||||
jobdata=None, location=None):
|
||||
LOG.debug("VCNS: create snat rule %(src)s/%(translated)s", {
|
||||
'src': src, 'translated': translated})
|
||||
snat_rule = self._assemble_nat_rule("snat", src, translated)
|
||||
userdata = {
|
||||
'router_id': router_id,
|
||||
'edge_id': edge_id,
|
||||
'rule': snat_rule,
|
||||
'location': location,
|
||||
'jobdata': jobdata
|
||||
}
|
||||
task_name = "create-snat-%s-%s-%s" % (edge_id, src, translated)
|
||||
task = tasks.Task(task_name, router_id, self._create_nat_rule,
|
||||
userdata=userdata)
|
||||
task.add_result_monitor(self.callbacks.snat_create_result)
|
||||
self.task_manager.add(task)
|
||||
return task
|
||||
|
||||
def _delete_nat_rule(self, task):
|
||||
# TODO(fank): pass in rule_id for optimization
|
||||
# handle routes update for optimization
|
||||
edge_id = task.userdata['edge_id']
|
||||
address = task.userdata['address']
|
||||
addrtype = task.userdata['addrtype']
|
||||
LOG.debug("VCNS: start deleting %(type)s rules: %(addr)s", {
|
||||
'type': addrtype, 'addr': address})
|
||||
nat = self.get_nat_config(edge_id)
|
||||
del nat['version']
|
||||
status = constants.TaskStatus.COMPLETED
|
||||
for nat_rule in nat['rules']['natRulesDtos']:
|
||||
if nat_rule[addrtype] == address:
|
||||
rule_id = nat_rule['ruleId']
|
||||
try:
|
||||
self.vcns.delete_nat_rule(edge_id, rule_id)
|
||||
except exceptions.VcnsApiException as e:
|
||||
LOG.exception(_LE("VCNS: Failed to delete snat rule:\n"
|
||||
"%s"), e.response)
|
||||
status = constants.TaskStatus.ERROR
|
||||
|
||||
return status
|
||||
|
||||
def delete_snat_rule(self, router_id, edge_id, src, jobdata=None):
|
||||
LOG.debug("VCNS: delete snat rule %s", src)
|
||||
userdata = {
|
||||
'edge_id': edge_id,
|
||||
'address': src,
|
||||
'addrtype': 'originalAddress',
|
||||
'jobdata': jobdata
|
||||
}
|
||||
task_name = "delete-snat-%s-%s" % (edge_id, src)
|
||||
task = tasks.Task(task_name, router_id, self._delete_nat_rule,
|
||||
userdata=userdata)
|
||||
task.add_result_monitor(self.callbacks.snat_delete_result)
|
||||
self.task_manager.add(task)
|
||||
return task
|
||||
|
||||
def create_dnat_rule(self, router_id, edge_id, dst, translated,
|
||||
jobdata=None, location=None):
|
||||
# TODO(fank): use POST for optimization
|
||||
# return rule_id for future reference
|
||||
LOG.debug("VCNS: create dnat rule %(dst)s/%(translated)s", {
|
||||
'dst': dst, 'translated': translated})
|
||||
dnat_rule = self._assemble_nat_rule(
|
||||
"dnat", dst, translated)
|
||||
userdata = {
|
||||
'router_id': router_id,
|
||||
'edge_id': edge_id,
|
||||
'rule': dnat_rule,
|
||||
'location': location,
|
||||
'jobdata': jobdata
|
||||
}
|
||||
task_name = "create-dnat-%s-%s-%s" % (edge_id, dst, translated)
|
||||
task = tasks.Task(task_name, router_id, self._create_nat_rule,
|
||||
userdata=userdata)
|
||||
task.add_result_monitor(self.callbacks.dnat_create_result)
|
||||
self.task_manager.add(task)
|
||||
return task
|
||||
|
||||
def delete_dnat_rule(self, router_id, edge_id, translated,
|
||||
jobdata=None):
|
||||
# TODO(fank): pass in rule_id for optimization
|
||||
LOG.debug("VCNS: delete dnat rule %s", translated)
|
||||
userdata = {
|
||||
'edge_id': edge_id,
|
||||
'address': translated,
|
||||
'addrtype': 'translatedAddress',
|
||||
'jobdata': jobdata
|
||||
}
|
||||
task_name = "delete-dnat-%s-%s" % (edge_id, translated)
|
||||
task = tasks.Task(task_name, router_id, self._delete_nat_rule,
|
||||
userdata=userdata)
|
||||
task.add_result_monitor(self.callbacks.dnat_delete_result)
|
||||
self.task_manager.add(task)
|
||||
return task
|
||||
|
||||
def _update_nat_rule(self, task):
|
||||
# TODO(fank): use POST for optimization
|
||||
# return rule_id for future reference
|
||||
edge_id = task.userdata['edge_id']
|
||||
if task != self.updated_task['nat'][edge_id]:
|
||||
# this task does not have the latest config, abort now
|
||||
# for speedup
|
||||
return constants.TaskStatus.ABORT
|
||||
|
||||
rules = task.userdata['rules']
|
||||
LOG.debug("VCNS: start updating nat rules: %s", rules)
|
||||
|
||||
nat = {
|
||||
'featureType': 'nat',
|
||||
'rules': {
|
||||
'natRulesDtos': rules
|
||||
}
|
||||
}
|
||||
|
||||
try:
|
||||
self.vcns.update_nat_config(edge_id, nat)
|
||||
status = constants.TaskStatus.COMPLETED
|
||||
except exceptions.VcnsApiException as e:
|
||||
LOG.exception(_LE("VCNS: Failed to create snat rule:\n%s"),
|
||||
e.response)
|
||||
status = constants.TaskStatus.ERROR
|
||||
|
||||
return status
|
||||
|
||||
def update_nat_rules(self, router_id, edge_id, snats, dnats,
|
||||
jobdata=None):
|
||||
LOG.debug("VCNS: update nat rule\n"
|
||||
"SNAT:%(snat)s\n"
|
||||
"DNAT:%(dnat)s\n", {
|
||||
'snat': snats, 'dnat': dnats})
|
||||
nat_rules = []
|
||||
|
||||
for dnat in dnats:
|
||||
nat_rules.append(self._assemble_nat_rule(
|
||||
'dnat', dnat['dst'], dnat['translated']))
|
||||
nat_rules.append(self._assemble_nat_rule(
|
||||
'snat', dnat['translated'], dnat['dst']))
|
||||
|
||||
for snat in snats:
|
||||
nat_rules.append(self._assemble_nat_rule(
|
||||
'snat', snat['src'], snat['translated']))
|
||||
|
||||
userdata = {
|
||||
'edge_id': edge_id,
|
||||
'rules': nat_rules,
|
||||
'jobdata': jobdata,
|
||||
}
|
||||
task_name = "update-nat-%s" % edge_id
|
||||
task = tasks.Task(task_name, router_id, self._update_nat_rule,
|
||||
userdata=userdata)
|
||||
task.add_result_monitor(self.callbacks.nat_update_result)
|
||||
self.updated_task['nat'][edge_id] = task
|
||||
self.task_manager.add(task)
|
||||
return task
|
||||
|
||||
def _update_routes(self, task):
|
||||
edge_id = task.userdata['edge_id']
|
||||
if (task != self.updated_task['route'][edge_id] and
|
||||
task.userdata.get('skippable', True)):
|
||||
# this task does not have the latest config, abort now
|
||||
# for speedup
|
||||
return constants.TaskStatus.ABORT
|
||||
gateway = task.userdata['gateway']
|
||||
routes = task.userdata['routes']
|
||||
LOG.debug("VCNS: start updating routes for %s", edge_id)
|
||||
static_routes = []
|
||||
for route in routes:
|
||||
static_routes.append({
|
||||
"description": "",
|
||||
"vnic": vcns_const.INTERNAL_VNIC_INDEX,
|
||||
"network": route['cidr'],
|
||||
"nextHop": route['nexthop']
|
||||
})
|
||||
request = {
|
||||
"staticRoutes": {
|
||||
"staticRoutes": static_routes
|
||||
}
|
||||
}
|
||||
if gateway:
|
||||
request["defaultRoute"] = {
|
||||
"description": "default-gateway",
|
||||
"gatewayAddress": gateway,
|
||||
"vnic": vcns_const.EXTERNAL_VNIC_INDEX
|
||||
}
|
||||
try:
|
||||
self.vcns.update_routes(edge_id, request)
|
||||
status = constants.TaskStatus.COMPLETED
|
||||
except exceptions.VcnsApiException as e:
|
||||
LOG.exception(_LE("VCNS: Failed to update routes:\n%s"),
|
||||
e.response)
|
||||
status = constants.TaskStatus.ERROR
|
||||
|
||||
return status
|
||||
|
||||
def update_routes(self, router_id, edge_id, gateway, routes,
|
||||
skippable=True, jobdata=None):
|
||||
if gateway:
|
||||
gateway = gateway.split('/')[0]
|
||||
|
||||
userdata = {
|
||||
'edge_id': edge_id,
|
||||
'gateway': gateway,
|
||||
'routes': routes,
|
||||
'skippable': skippable,
|
||||
'jobdata': jobdata
|
||||
}
|
||||
task_name = "update-routes-%s" % (edge_id)
|
||||
task = tasks.Task(task_name, router_id, self._update_routes,
|
||||
userdata=userdata)
|
||||
task.add_result_monitor(self.callbacks.routes_update_result)
|
||||
self.updated_task['route'][edge_id] = task
|
||||
self.task_manager.add(task)
|
||||
return task
|
||||
|
||||
def create_lswitch(self, name, tz_config, tags=None,
|
||||
port_isolation=False, replication_mode="service"):
|
||||
lsconfig = {
|
||||
'display_name': utils.check_and_truncate(name),
|
||||
"tags": tags or [],
|
||||
"type": "LogicalSwitchConfig",
|
||||
"_schema": "/ws.v1/schema/LogicalSwitchConfig",
|
||||
"transport_zones": tz_config
|
||||
}
|
||||
if port_isolation is bool:
|
||||
lsconfig["port_isolation_enabled"] = port_isolation
|
||||
if replication_mode:
|
||||
lsconfig["replication_mode"] = replication_mode
|
||||
|
||||
response = self.vcns.create_lswitch(lsconfig)[1]
|
||||
return response
|
||||
|
||||
def delete_lswitch(self, lswitch_id):
|
||||
self.vcns.delete_lswitch(lswitch_id)
|
||||
|
||||
def get_loadbalancer_config(self, edge_id):
|
||||
try:
|
||||
header, response = self.vcns.get_loadbalancer_config(
|
||||
edge_id)
|
||||
except exceptions.VcnsApiException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("Failed to get service config"))
|
||||
return response
|
||||
|
||||
def enable_service_loadbalancer(self, edge_id):
|
||||
config = self.get_loadbalancer_config(
|
||||
edge_id)
|
||||
if not config['enabled']:
|
||||
config['enabled'] = True
|
||||
try:
|
||||
self.vcns.enable_service_loadbalancer(edge_id, config)
|
||||
except exceptions.VcnsApiException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("Failed to enable loadbalancer "
|
||||
"service config"))
|
@ -1,44 +0,0 @@
|
||||
# Copyright 2013 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
class TaskStatus(object):
|
||||
"""Task running status.
|
||||
|
||||
This is used by execution/status callback function to notify the
|
||||
task manager what's the status of current task, and also used for
|
||||
indication the final task execution result.
|
||||
"""
|
||||
PENDING = 1
|
||||
COMPLETED = 2
|
||||
ERROR = 3
|
||||
ABORT = 4
|
||||
|
||||
|
||||
class TaskState(object):
|
||||
"""Current state of a task.
|
||||
|
||||
This is to keep track of the current state of a task.
|
||||
NONE: the task is still in the queue
|
||||
START: the task is pull out from the queue and is about to be executed
|
||||
EXECUTED: the task has been executed
|
||||
STATUS: we're running periodic status check for this task
|
||||
RESULT: the task has finished and result is ready
|
||||
"""
|
||||
NONE = -1
|
||||
START = 0
|
||||
EXECUTED = 1
|
||||
STATUS = 2
|
||||
RESULT = 3
|
@ -1,394 +0,0 @@
|
||||
# Copyright 2013 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import collections
|
||||
import uuid
|
||||
|
||||
from eventlet import event
|
||||
from eventlet import greenthread
|
||||
|
||||
from neutron.common import exceptions
|
||||
from neutron.i18n import _LE, _LI
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.openstack.common import loopingcall
|
||||
from neutron.plugins.vmware.vshield.tasks import constants
|
||||
|
||||
DEFAULT_INTERVAL = 1000
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def nop(task):
|
||||
return constants.TaskStatus.COMPLETED
|
||||
|
||||
|
||||
class TaskException(exceptions.NeutronException):
|
||||
|
||||
def __init__(self, message=None, **kwargs):
|
||||
if message is not None:
|
||||
self.message = message
|
||||
|
||||
super(TaskException, self).__init__(**kwargs)
|
||||
|
||||
|
||||
class InvalidState(TaskException):
|
||||
message = _("Invalid state %(state)d")
|
||||
|
||||
|
||||
class TaskStateSkipped(TaskException):
|
||||
message = _("State %(state)d skipped. Current state %(current)d")
|
||||
|
||||
|
||||
class Task(object):
|
||||
def __init__(self, name, resource_id, execute_callback,
|
||||
status_callback=nop, result_callback=nop, userdata=None):
|
||||
self.name = name
|
||||
self.resource_id = resource_id
|
||||
self._execute_callback = execute_callback
|
||||
self._status_callback = status_callback
|
||||
self._result_callback = result_callback
|
||||
self.userdata = userdata
|
||||
self.id = None
|
||||
self.status = None
|
||||
|
||||
self._monitors = {
|
||||
constants.TaskState.START: [],
|
||||
constants.TaskState.EXECUTED: [],
|
||||
constants.TaskState.RESULT: []
|
||||
}
|
||||
self._states = [None, None, None, None]
|
||||
self._state = constants.TaskState.NONE
|
||||
|
||||
def _add_monitor(self, action, func):
|
||||
self._monitors[action].append(func)
|
||||
return self
|
||||
|
||||
def _move_state(self, state):
|
||||
self._state = state
|
||||
if self._states[state] is not None:
|
||||
e = self._states[state]
|
||||
self._states[state] = None
|
||||
e.send()
|
||||
|
||||
for s in range(state):
|
||||
if self._states[s] is not None:
|
||||
e = self._states[s]
|
||||
self._states[s] = None
|
||||
e.send_exception(
|
||||
TaskStateSkipped(state=s, current=self._state))
|
||||
|
||||
def _invoke_monitor(self, state):
|
||||
for func in self._monitors[state]:
|
||||
try:
|
||||
func(self)
|
||||
except Exception:
|
||||
LOG.exception(_LE("Task %(task)s encountered exception in "
|
||||
"%(func)s at state %(state)s"),
|
||||
{'task': str(self),
|
||||
'func': str(func),
|
||||
'state': state})
|
||||
|
||||
self._move_state(state)
|
||||
|
||||
return self
|
||||
|
||||
def _start(self):
|
||||
return self._invoke_monitor(constants.TaskState.START)
|
||||
|
||||
def _executed(self):
|
||||
return self._invoke_monitor(constants.TaskState.EXECUTED)
|
||||
|
||||
def _update_status(self, status):
|
||||
if self.status == status:
|
||||
return self
|
||||
|
||||
self.status = status
|
||||
|
||||
def _finished(self):
|
||||
return self._invoke_monitor(constants.TaskState.RESULT)
|
||||
|
||||
def add_start_monitor(self, func):
|
||||
return self._add_monitor(constants.TaskState.START, func)
|
||||
|
||||
def add_executed_monitor(self, func):
|
||||
return self._add_monitor(constants.TaskState.EXECUTED, func)
|
||||
|
||||
def add_result_monitor(self, func):
|
||||
return self._add_monitor(constants.TaskState.RESULT, func)
|
||||
|
||||
def wait(self, state):
|
||||
if (state < constants.TaskState.START or
|
||||
state > constants.TaskState.RESULT or
|
||||
state == constants.TaskState.STATUS):
|
||||
raise InvalidState(state=state)
|
||||
|
||||
if state <= self._state:
|
||||
# we already passed this current state, so no wait
|
||||
return
|
||||
|
||||
e = event.Event()
|
||||
self._states[state] = e
|
||||
e.wait()
|
||||
|
||||
def __repr__(self):
|
||||
return "Task-%s-%s-%s" % (
|
||||
self.name, self.resource_id, self.id)
|
||||
|
||||
|
||||
class TaskManager(object):
|
||||
|
||||
_instance = None
|
||||
_default_interval = DEFAULT_INTERVAL
|
||||
|
||||
def __init__(self, interval=None):
|
||||
self._interval = interval or TaskManager._default_interval
|
||||
|
||||
# A queue to pass tasks from other threads
|
||||
self._tasks_queue = collections.deque()
|
||||
|
||||
# A dict to store resource -> resource's tasks
|
||||
self._tasks = {}
|
||||
|
||||
# Current task being executed in main thread
|
||||
self._main_thread_exec_task = None
|
||||
|
||||
# New request event
|
||||
self._req = event.Event()
|
||||
|
||||
# TaskHandler stopped event
|
||||
self._stopped = False
|
||||
|
||||
# Periodic function trigger
|
||||
self._monitor = None
|
||||
self._monitor_busy = False
|
||||
|
||||
# Thread handling the task request
|
||||
self._thread = None
|
||||
|
||||
def _execute(self, task):
|
||||
"""Execute task."""
|
||||
LOG.debug("Start task %s", str(task))
|
||||
task._start()
|
||||
try:
|
||||
status = task._execute_callback(task)
|
||||
except Exception:
|
||||
LOG.exception(_LE("Task %(task)s encountered exception in %(cb)s"),
|
||||
{'task': str(task),
|
||||
'cb': str(task._execute_callback)})
|
||||
status = constants.TaskStatus.ERROR
|
||||
|
||||
LOG.debug("Task %(task)s return %(status)s", {
|
||||
'task': str(task),
|
||||
'status': status})
|
||||
|
||||
task._update_status(status)
|
||||
task._executed()
|
||||
|
||||
return status
|
||||
|
||||
def _result(self, task):
|
||||
"""Notify task execution result."""
|
||||
try:
|
||||
task._result_callback(task)
|
||||
except Exception:
|
||||
LOG.exception(_LE("Task %(task)s encountered exception in %(cb)s"),
|
||||
{'task': str(task),
|
||||
'cb': str(task._result_callback)})
|
||||
|
||||
LOG.debug("Task %(task)s return %(status)s",
|
||||
{'task': str(task), 'status': task.status})
|
||||
|
||||
task._finished()
|
||||
|
||||
def _check_pending_tasks(self):
|
||||
"""Check all pending tasks status."""
|
||||
for resource_id in self._tasks.keys():
|
||||
if self._stopped:
|
||||
# Task manager is stopped, return now
|
||||
return
|
||||
|
||||
tasks = self._tasks[resource_id]
|
||||
# only the first task is executed and pending
|
||||
task = tasks[0]
|
||||
try:
|
||||
status = task._status_callback(task)
|
||||
except Exception:
|
||||
LOG.exception(_LE("Task %(task)s encountered exception in "
|
||||
"%(cb)s"),
|
||||
{'task': str(task),
|
||||
'cb': str(task._status_callback)})
|
||||
status = constants.TaskStatus.ERROR
|
||||
task._update_status(status)
|
||||
if status != constants.TaskStatus.PENDING:
|
||||
self._dequeue(task, True)
|
||||
|
||||
def _enqueue(self, task):
|
||||
if task.resource_id in self._tasks:
|
||||
# append to existing resource queue for ordered processing
|
||||
self._tasks[task.resource_id].append(task)
|
||||
else:
|
||||
# put the task to a new resource queue
|
||||
tasks = collections.deque()
|
||||
tasks.append(task)
|
||||
self._tasks[task.resource_id] = tasks
|
||||
|
||||
def _dequeue(self, task, run_next):
|
||||
self._result(task)
|
||||
tasks = self._tasks[task.resource_id]
|
||||
tasks.remove(task)
|
||||
if not tasks:
|
||||
# no more tasks for this resource
|
||||
del self._tasks[task.resource_id]
|
||||
return
|
||||
|
||||
if run_next:
|
||||
# process next task for this resource
|
||||
while tasks:
|
||||
task = tasks[0]
|
||||
status = self._execute(task)
|
||||
if status == constants.TaskStatus.PENDING:
|
||||
break
|
||||
self._dequeue(task, False)
|
||||
|
||||
def _abort(self):
|
||||
"""Abort all tasks."""
|
||||
# put all tasks haven't been received by main thread to queue
|
||||
# so the following abort handling can cover them
|
||||
for t in self._tasks_queue:
|
||||
self._enqueue(t)
|
||||
self._tasks_queue.clear()
|
||||
|
||||
for resource_id in self._tasks.keys():
|
||||
tasks = list(self._tasks[resource_id])
|
||||
for task in tasks:
|
||||
task._update_status(constants.TaskStatus.ABORT)
|
||||
self._dequeue(task, False)
|
||||
|
||||
def _get_task(self):
|
||||
"""Get task request."""
|
||||
while True:
|
||||
for t in self._tasks_queue:
|
||||
return self._tasks_queue.popleft()
|
||||
self._req.wait()
|
||||
self._req.reset()
|
||||
|
||||
def run(self):
|
||||
while True:
|
||||
try:
|
||||
if self._stopped:
|
||||
# Gracefully terminate this thread if the _stopped
|
||||
# attribute was set to true
|
||||
LOG.info(_LI("Stopping TaskManager"))
|
||||
break
|
||||
|
||||
# get a task from queue, or timeout for periodic status check
|
||||
task = self._get_task()
|
||||
if task.resource_id in self._tasks:
|
||||
# this resource already has some tasks under processing,
|
||||
# append the task to same queue for ordered processing
|
||||
self._enqueue(task)
|
||||
continue
|
||||
|
||||
try:
|
||||
self._main_thread_exec_task = task
|
||||
self._execute(task)
|
||||
finally:
|
||||
self._main_thread_exec_task = None
|
||||
if task.status is None:
|
||||
# The thread is killed during _execute(). To guarantee
|
||||
# the task been aborted correctly, put it to the queue.
|
||||
self._enqueue(task)
|
||||
elif task.status != constants.TaskStatus.PENDING:
|
||||
self._result(task)
|
||||
else:
|
||||
self._enqueue(task)
|
||||
except Exception:
|
||||
LOG.exception(_LE("TaskManager terminating because "
|
||||
"of an exception"))
|
||||
break
|
||||
|
||||
def add(self, task):
|
||||
task.id = uuid.uuid1()
|
||||
self._tasks_queue.append(task)
|
||||
if not self._req.ready():
|
||||
self._req.send()
|
||||
return task.id
|
||||
|
||||
def stop(self):
|
||||
if self._thread is None:
|
||||
return
|
||||
self._stopped = True
|
||||
self._thread.kill()
|
||||
self._thread = None
|
||||
# Stop looping call and abort running tasks
|
||||
self._monitor.stop()
|
||||
if self._monitor_busy:
|
||||
self._monitor.wait()
|
||||
self._abort()
|
||||
LOG.info(_LI("TaskManager terminated"))
|
||||
|
||||
def has_pending_task(self):
|
||||
if self._tasks_queue or self._tasks or self._main_thread_exec_task:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def show_pending_tasks(self):
|
||||
for task in self._tasks_queue:
|
||||
LOG.info(str(task))
|
||||
for resource, tasks in self._tasks.iteritems():
|
||||
for task in tasks:
|
||||
LOG.info(str(task))
|
||||
if self._main_thread_exec_task:
|
||||
LOG.info(str(self._main_thread_exec_task))
|
||||
|
||||
def count(self):
|
||||
count = 0
|
||||
for resource_id, tasks in self._tasks.iteritems():
|
||||
count += len(tasks)
|
||||
return count
|
||||
|
||||
def start(self, interval=None):
|
||||
def _inner():
|
||||
self.run()
|
||||
|
||||
def _loopingcall_callback():
|
||||
self._monitor_busy = True
|
||||
try:
|
||||
self._check_pending_tasks()
|
||||
except Exception:
|
||||
LOG.exception(_LE("Exception in _check_pending_tasks"))
|
||||
self._monitor_busy = False
|
||||
|
||||
if self._thread is not None:
|
||||
return self
|
||||
|
||||
if interval is None or interval == 0:
|
||||
interval = self._interval
|
||||
|
||||
self._stopped = False
|
||||
self._thread = greenthread.spawn(_inner)
|
||||
self._monitor = loopingcall.FixedIntervalLoopingCall(
|
||||
_loopingcall_callback)
|
||||
self._monitor.start(interval / 1000.0,
|
||||
interval / 1000.0)
|
||||
# To allow the created thread start running
|
||||
greenthread.sleep(0)
|
||||
|
||||
return self
|
||||
|
||||
@classmethod
|
||||
def set_default_interval(cls, interval):
|
||||
cls._default_interval = interval
|
@ -1,303 +0,0 @@
|
||||
# Copyright 2013 VMware, Inc
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_serialization import jsonutils
|
||||
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.vmware.vshield.common import VcnsApiClient
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
HTTP_GET = "GET"
|
||||
HTTP_POST = "POST"
|
||||
HTTP_DELETE = "DELETE"
|
||||
HTTP_PUT = "PUT"
|
||||
URI_PREFIX = "/api/4.0/edges"
|
||||
|
||||
#FwaaS constants
|
||||
FIREWALL_SERVICE = "firewall/config"
|
||||
FIREWALL_RULE_RESOURCE = "rules"
|
||||
|
||||
#LbaaS Constants
|
||||
LOADBALANCER_SERVICE = "loadbalancer/config"
|
||||
VIP_RESOURCE = "virtualservers"
|
||||
POOL_RESOURCE = "pools"
|
||||
MONITOR_RESOURCE = "monitors"
|
||||
APP_PROFILE_RESOURCE = "applicationprofiles"
|
||||
|
||||
# IPsec VPNaaS Constants
|
||||
IPSEC_VPN_SERVICE = 'ipsec/config'
|
||||
|
||||
|
||||
class Vcns(object):
|
||||
|
||||
def __init__(self, address, user, password):
|
||||
self.address = address
|
||||
self.user = user
|
||||
self.password = password
|
||||
self.jsonapi_client = VcnsApiClient.VcnsApiHelper(address, user,
|
||||
password, 'json')
|
||||
|
||||
def do_request(self, method, uri, params=None, format='json', **kwargs):
|
||||
LOG.debug("VcnsApiHelper('%(method)s', '%(uri)s', '%(body)s')", {
|
||||
'method': method,
|
||||
'uri': uri,
|
||||
'body': jsonutils.dumps(params)})
|
||||
if format == 'json':
|
||||
header, content = self.jsonapi_client.request(method, uri, params)
|
||||
else:
|
||||
header, content = self.xmlapi_client.request(method, uri, params)
|
||||
LOG.debug("Header: '%s'", header)
|
||||
LOG.debug("Content: '%s'", content)
|
||||
if content == '':
|
||||
return header, {}
|
||||
if kwargs.get('decode', True):
|
||||
content = jsonutils.loads(content)
|
||||
return header, content
|
||||
|
||||
def deploy_edge(self, request):
|
||||
uri = URI_PREFIX + "?async=true"
|
||||
return self.do_request(HTTP_POST, uri, request, decode=False)
|
||||
|
||||
def get_edge_id(self, job_id):
|
||||
uri = URI_PREFIX + "/jobs/%s" % job_id
|
||||
return self.do_request(HTTP_GET, uri, decode=True)
|
||||
|
||||
def get_edge_deploy_status(self, edge_id):
|
||||
uri = URI_PREFIX + "/%s/status?getlatest=false" % edge_id
|
||||
return self.do_request(HTTP_GET, uri, decode="True")
|
||||
|
||||
def delete_edge(self, edge_id):
|
||||
uri = "%s/%s" % (URI_PREFIX, edge_id)
|
||||
return self.do_request(HTTP_DELETE, uri)
|
||||
|
||||
def update_interface(self, edge_id, vnic):
|
||||
uri = "%s/%s/vnics/%d" % (URI_PREFIX, edge_id, vnic['index'])
|
||||
return self.do_request(HTTP_PUT, uri, vnic, decode=True)
|
||||
|
||||
def get_nat_config(self, edge_id):
|
||||
uri = "%s/%s/nat/config" % (URI_PREFIX, edge_id)
|
||||
return self.do_request(HTTP_GET, uri, decode=True)
|
||||
|
||||
def update_nat_config(self, edge_id, nat):
|
||||
uri = "%s/%s/nat/config" % (URI_PREFIX, edge_id)
|
||||
return self.do_request(HTTP_PUT, uri, nat, decode=True)
|
||||
|
||||
def delete_nat_rule(self, edge_id, rule_id):
|
||||
uri = "%s/%s/nat/config/rules/%s" % (URI_PREFIX, edge_id, rule_id)
|
||||
return self.do_request(HTTP_DELETE, uri, decode=True)
|
||||
|
||||
def get_edge_status(self, edge_id):
|
||||
uri = "%s/%s/status?getlatest=false" % (URI_PREFIX, edge_id)
|
||||
return self.do_request(HTTP_GET, uri, decode=True)
|
||||
|
||||
def get_edges(self):
|
||||
uri = URI_PREFIX
|
||||
return self.do_request(HTTP_GET, uri, decode=True)
|
||||
|
||||
def update_routes(self, edge_id, routes):
|
||||
uri = "%s/%s/routing/config/static" % (URI_PREFIX, edge_id)
|
||||
return self.do_request(HTTP_PUT, uri, routes)
|
||||
|
||||
def create_lswitch(self, lsconfig):
|
||||
uri = "/api/ws.v1/lswitch"
|
||||
return self.do_request(HTTP_POST, uri, lsconfig, decode=True)
|
||||
|
||||
def delete_lswitch(self, lswitch_id):
|
||||
uri = "/api/ws.v1/lswitch/%s" % lswitch_id
|
||||
return self.do_request(HTTP_DELETE, uri)
|
||||
|
||||
def get_loadbalancer_config(self, edge_id):
|
||||
uri = self._build_uri_path(edge_id, LOADBALANCER_SERVICE)
|
||||
return self.do_request(HTTP_GET, uri, decode=True)
|
||||
|
||||
def enable_service_loadbalancer(self, edge_id, config):
|
||||
uri = self._build_uri_path(edge_id, LOADBALANCER_SERVICE)
|
||||
return self.do_request(HTTP_PUT, uri, config)
|
||||
|
||||
def update_firewall(self, edge_id, fw_req):
|
||||
uri = self._build_uri_path(
|
||||
edge_id, FIREWALL_SERVICE)
|
||||
return self.do_request(HTTP_PUT, uri, fw_req)
|
||||
|
||||
def delete_firewall(self, edge_id):
|
||||
uri = self._build_uri_path(
|
||||
edge_id, FIREWALL_SERVICE, None)
|
||||
return self.do_request(HTTP_DELETE, uri)
|
||||
|
||||
def update_firewall_rule(self, edge_id, vcns_rule_id, fwr_req):
|
||||
uri = self._build_uri_path(
|
||||
edge_id, FIREWALL_SERVICE,
|
||||
FIREWALL_RULE_RESOURCE,
|
||||
vcns_rule_id)
|
||||
return self.do_request(HTTP_PUT, uri, fwr_req)
|
||||
|
||||
def delete_firewall_rule(self, edge_id, vcns_rule_id):
|
||||
uri = self._build_uri_path(
|
||||
edge_id, FIREWALL_SERVICE,
|
||||
FIREWALL_RULE_RESOURCE,
|
||||
vcns_rule_id)
|
||||
return self.do_request(HTTP_DELETE, uri)
|
||||
|
||||
def add_firewall_rule_above(self, edge_id, ref_vcns_rule_id, fwr_req):
|
||||
uri = self._build_uri_path(
|
||||
edge_id, FIREWALL_SERVICE,
|
||||
FIREWALL_RULE_RESOURCE)
|
||||
uri += "?aboveRuleId=" + ref_vcns_rule_id
|
||||
return self.do_request(HTTP_POST, uri, fwr_req)
|
||||
|
||||
def add_firewall_rule(self, edge_id, fwr_req):
|
||||
uri = self._build_uri_path(
|
||||
edge_id, FIREWALL_SERVICE,
|
||||
FIREWALL_RULE_RESOURCE)
|
||||
return self.do_request(HTTP_POST, uri, fwr_req)
|
||||
|
||||
def get_firewall(self, edge_id):
|
||||
uri = self._build_uri_path(edge_id, FIREWALL_SERVICE)
|
||||
return self.do_request(HTTP_GET, uri, decode=True)
|
||||
|
||||
def get_firewall_rule(self, edge_id, vcns_rule_id):
|
||||
uri = self._build_uri_path(
|
||||
edge_id, FIREWALL_SERVICE,
|
||||
FIREWALL_RULE_RESOURCE,
|
||||
vcns_rule_id)
|
||||
return self.do_request(HTTP_GET, uri, decode=True)
|
||||
|
||||
#
|
||||
#Edge LBAAS call helper
|
||||
#
|
||||
def create_vip(self, edge_id, vip_new):
|
||||
uri = self._build_uri_path(
|
||||
edge_id, LOADBALANCER_SERVICE,
|
||||
VIP_RESOURCE)
|
||||
return self.do_request(HTTP_POST, uri, vip_new)
|
||||
|
||||
def get_vip(self, edge_id, vip_vseid):
|
||||
uri = self._build_uri_path(
|
||||
edge_id, LOADBALANCER_SERVICE,
|
||||
VIP_RESOURCE, vip_vseid)
|
||||
return self.do_request(HTTP_GET, uri, decode=True)
|
||||
|
||||
def update_vip(self, edge_id, vip_vseid, vip_new):
|
||||
uri = self._build_uri_path(
|
||||
edge_id, LOADBALANCER_SERVICE,
|
||||
VIP_RESOURCE, vip_vseid)
|
||||
return self.do_request(HTTP_PUT, uri, vip_new)
|
||||
|
||||
def delete_vip(self, edge_id, vip_vseid):
|
||||
uri = self._build_uri_path(
|
||||
edge_id, LOADBALANCER_SERVICE,
|
||||
VIP_RESOURCE, vip_vseid)
|
||||
return self.do_request(HTTP_DELETE, uri)
|
||||
|
||||
def create_pool(self, edge_id, pool_new):
|
||||
uri = self._build_uri_path(
|
||||
edge_id, LOADBALANCER_SERVICE,
|
||||
POOL_RESOURCE)
|
||||
return self.do_request(HTTP_POST, uri, pool_new)
|
||||
|
||||
def get_pool(self, edge_id, pool_vseid):
|
||||
uri = self._build_uri_path(
|
||||
edge_id, LOADBALANCER_SERVICE,
|
||||
POOL_RESOURCE, pool_vseid)
|
||||
return self.do_request(HTTP_GET, uri, decode=True)
|
||||
|
||||
def update_pool(self, edge_id, pool_vseid, pool_new):
|
||||
uri = self._build_uri_path(
|
||||
edge_id, LOADBALANCER_SERVICE,
|
||||
POOL_RESOURCE, pool_vseid)
|
||||
return self.do_request(HTTP_PUT, uri, pool_new)
|
||||
|
||||
def delete_pool(self, edge_id, pool_vseid):
|
||||
uri = self._build_uri_path(
|
||||
edge_id, LOADBALANCER_SERVICE,
|
||||
POOL_RESOURCE, pool_vseid)
|
||||
return self.do_request(HTTP_DELETE, uri)
|
||||
|
||||
def create_health_monitor(self, edge_id, monitor_new):
|
||||
uri = self._build_uri_path(
|
||||
edge_id, LOADBALANCER_SERVICE,
|
||||
MONITOR_RESOURCE)
|
||||
return self.do_request(HTTP_POST, uri, monitor_new)
|
||||
|
||||
def get_health_monitor(self, edge_id, monitor_vseid):
|
||||
uri = self._build_uri_path(
|
||||
edge_id, LOADBALANCER_SERVICE,
|
||||
MONITOR_RESOURCE, monitor_vseid)
|
||||
return self.do_request(HTTP_GET, uri, decode=True)
|
||||
|
||||
def update_health_monitor(self, edge_id, monitor_vseid, monitor_new):
|
||||
uri = self._build_uri_path(
|
||||
edge_id, LOADBALANCER_SERVICE,
|
||||
MONITOR_RESOURCE,
|
||||
monitor_vseid)
|
||||
return self.do_request(HTTP_PUT, uri, monitor_new)
|
||||
|
||||
def delete_health_monitor(self, edge_id, monitor_vseid):
|
||||
uri = self._build_uri_path(
|
||||
edge_id, LOADBALANCER_SERVICE,
|
||||
MONITOR_RESOURCE,
|
||||
monitor_vseid)
|
||||
return self.do_request(HTTP_DELETE, uri)
|
||||
|
||||
def create_app_profile(self, edge_id, app_profile):
|
||||
uri = self._build_uri_path(
|
||||
edge_id, LOADBALANCER_SERVICE,
|
||||
APP_PROFILE_RESOURCE)
|
||||
return self.do_request(HTTP_POST, uri, app_profile)
|
||||
|
||||
def update_app_profile(self, edge_id, app_profileid, app_profile):
|
||||
uri = self._build_uri_path(
|
||||
edge_id, LOADBALANCER_SERVICE,
|
||||
APP_PROFILE_RESOURCE, app_profileid)
|
||||
return self.do_request(HTTP_PUT, uri, app_profile)
|
||||
|
||||
def delete_app_profile(self, edge_id, app_profileid):
|
||||
uri = self._build_uri_path(
|
||||
edge_id, LOADBALANCER_SERVICE,
|
||||
APP_PROFILE_RESOURCE,
|
||||
app_profileid)
|
||||
return self.do_request(HTTP_DELETE, uri)
|
||||
|
||||
def update_ipsec_config(self, edge_id, ipsec_config):
|
||||
uri = self._build_uri_path(edge_id, IPSEC_VPN_SERVICE)
|
||||
return self.do_request(HTTP_PUT, uri, ipsec_config)
|
||||
|
||||
def delete_ipsec_config(self, edge_id):
|
||||
uri = self._build_uri_path(edge_id, IPSEC_VPN_SERVICE)
|
||||
return self.do_request(HTTP_DELETE, uri)
|
||||
|
||||
def get_ipsec_config(self, edge_id):
|
||||
uri = self._build_uri_path(edge_id, IPSEC_VPN_SERVICE)
|
||||
return self.do_request(HTTP_GET, uri)
|
||||
|
||||
def _build_uri_path(self, edge_id,
|
||||
service,
|
||||
resource=None,
|
||||
resource_id=None,
|
||||
parent_resource_id=None,
|
||||
fields=None,
|
||||
relations=None,
|
||||
filters=None,
|
||||
types=None,
|
||||
is_attachment=False):
|
||||
uri_prefix = "%s/%s/%s" % (URI_PREFIX, edge_id, service)
|
||||
if resource:
|
||||
res_path = resource
|
||||
if resource_id:
|
||||
res_path += "/%s" % resource_id
|
||||
uri_path = "%s/%s" % (uri_prefix, res_path)
|
||||
else:
|
||||
uri_path = uri_prefix
|
||||
return uri_path
|
@ -1,43 +0,0 @@
|
||||
# Copyright 2013 VMware, Inc
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.vmware.common import config # noqa
|
||||
from neutron.plugins.vmware.vshield import edge_appliance_driver
|
||||
from neutron.plugins.vmware.vshield.tasks import tasks
|
||||
from neutron.plugins.vmware.vshield import vcns
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class VcnsDriver(edge_appliance_driver.EdgeApplianceDriver):
|
||||
|
||||
def __init__(self, callbacks):
|
||||
super(VcnsDriver, self).__init__()
|
||||
|
||||
self.callbacks = callbacks
|
||||
self.vcns_uri = cfg.CONF.vcns.manager_uri
|
||||
self.vcns_user = cfg.CONF.vcns.user
|
||||
self.vcns_passwd = cfg.CONF.vcns.password
|
||||
self.datacenter_moid = cfg.CONF.vcns.datacenter_moid
|
||||
self.deployment_container_id = cfg.CONF.vcns.deployment_container_id
|
||||
self.resource_pool_id = cfg.CONF.vcns.resource_pool_id
|
||||
self.datastore_id = cfg.CONF.vcns.datastore_id
|
||||
self.external_network = cfg.CONF.vcns.external_network
|
||||
interval = cfg.CONF.vcns.task_status_check_interval
|
||||
self.task_manager = tasks.TaskManager(interval)
|
||||
self.task_manager.start()
|
||||
self.vcns = vcns.Vcns(self.vcns_uri, self.vcns_user, self.vcns_passwd)
|
@ -1,44 +0,0 @@
|
||||
# Copyright 2013 OpenStack Foundation.
|
||||
#
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
|
||||
from neutron.plugins.vmware.api_client import client as nsx_client
|
||||
from neutron.plugins.vmware.api_client import eventlet_client
|
||||
from neutron.plugins.vmware import extensions
|
||||
import neutron.plugins.vmware.plugin as neutron_plugin
|
||||
from neutron.plugins.vmware.vshield import vcns
|
||||
|
||||
|
||||
plugin = neutron_plugin.NsxPlugin
|
||||
api_client = nsx_client.NsxApiClient
|
||||
evt_client = eventlet_client.EventletApiClient
|
||||
vcns_class = vcns.Vcns
|
||||
|
||||
STUBS_PATH = os.path.join(os.path.dirname(__file__), 'etc')
|
||||
NSXEXT_PATH = os.path.dirname(extensions.__file__)
|
||||
NSXAPI_NAME = '%s.%s' % (api_client.__module__, api_client.__name__)
|
||||
PLUGIN_NAME = '%s.%s' % (plugin.__module__, plugin.__name__)
|
||||
CLIENT_NAME = '%s.%s' % (evt_client.__module__, evt_client.__name__)
|
||||
VCNS_NAME = '%s.%s' % (vcns_class.__module__, vcns_class.__name__)
|
||||
|
||||
|
||||
def get_fake_conf(filename):
|
||||
return os.path.join(STUBS_PATH, filename)
|
||||
|
||||
|
||||
def nsx_method(method_name, module_name='nsxlib'):
|
||||
return '%s.%s.%s' % ('neutron.plugins.vmware', module_name, method_name)
|
@ -1,661 +0,0 @@
|
||||
# Copyright 2012 VMware, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_serialization import jsonutils
|
||||
import six.moves.urllib.parse as urlparse
|
||||
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.openstack.common import uuidutils
|
||||
from neutron.plugins.vmware.api_client import exception as api_exc
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
MAX_NAME_LEN = 40
|
||||
|
||||
|
||||
def _validate_name(name):
|
||||
if name and len(name) > MAX_NAME_LEN:
|
||||
raise Exception("Logical switch name exceeds %d characters",
|
||||
MAX_NAME_LEN)
|
||||
|
||||
|
||||
def _validate_resource(body):
|
||||
_validate_name(body.get('display_name'))
|
||||
|
||||
|
||||
class FakeClient(object):
|
||||
|
||||
LSWITCH_RESOURCE = 'lswitch'
|
||||
LPORT_RESOURCE = 'lport'
|
||||
LROUTER_RESOURCE = 'lrouter'
|
||||
NAT_RESOURCE = 'nat'
|
||||
LQUEUE_RESOURCE = 'lqueue'
|
||||
SECPROF_RESOURCE = 'securityprofile'
|
||||
LSWITCH_STATUS = 'lswitchstatus'
|
||||
LROUTER_STATUS = 'lrouterstatus'
|
||||
LSWITCH_LPORT_RESOURCE = 'lswitch_lport'
|
||||
LROUTER_LPORT_RESOURCE = 'lrouter_lport'
|
||||
LROUTER_NAT_RESOURCE = 'lrouter_nat'
|
||||
LSWITCH_LPORT_STATUS = 'lswitch_lportstatus'
|
||||
LSWITCH_LPORT_ATT = 'lswitch_lportattachment'
|
||||
LROUTER_LPORT_STATUS = 'lrouter_lportstatus'
|
||||
LROUTER_LPORT_ATT = 'lrouter_lportattachment'
|
||||
GWSERVICE_RESOURCE = 'gatewayservice'
|
||||
|
||||
RESOURCES = [LSWITCH_RESOURCE, LROUTER_RESOURCE, LQUEUE_RESOURCE,
|
||||
LPORT_RESOURCE, NAT_RESOURCE, SECPROF_RESOURCE,
|
||||
GWSERVICE_RESOURCE]
|
||||
|
||||
FAKE_GET_RESPONSES = {
|
||||
LSWITCH_RESOURCE: "fake_get_lswitch.json",
|
||||
LSWITCH_LPORT_RESOURCE: "fake_get_lswitch_lport.json",
|
||||
LSWITCH_LPORT_STATUS: "fake_get_lswitch_lport_status.json",
|
||||
LSWITCH_LPORT_ATT: "fake_get_lswitch_lport_att.json",
|
||||
LROUTER_RESOURCE: "fake_get_lrouter.json",
|
||||
LROUTER_LPORT_RESOURCE: "fake_get_lrouter_lport.json",
|
||||
LROUTER_LPORT_STATUS: "fake_get_lrouter_lport_status.json",
|
||||
LROUTER_LPORT_ATT: "fake_get_lrouter_lport_att.json",
|
||||
LROUTER_STATUS: "fake_get_lrouter_status.json",
|
||||
LROUTER_NAT_RESOURCE: "fake_get_lrouter_nat.json",
|
||||
SECPROF_RESOURCE: "fake_get_security_profile.json",
|
||||
LQUEUE_RESOURCE: "fake_get_lqueue.json",
|
||||
GWSERVICE_RESOURCE: "fake_get_gwservice.json"
|
||||
}
|
||||
|
||||
FAKE_POST_RESPONSES = {
|
||||
LSWITCH_RESOURCE: "fake_post_lswitch.json",
|
||||
LROUTER_RESOURCE: "fake_post_lrouter.json",
|
||||
LSWITCH_LPORT_RESOURCE: "fake_post_lswitch_lport.json",
|
||||
LROUTER_LPORT_RESOURCE: "fake_post_lrouter_lport.json",
|
||||
LROUTER_NAT_RESOURCE: "fake_post_lrouter_nat.json",
|
||||
SECPROF_RESOURCE: "fake_post_security_profile.json",
|
||||
LQUEUE_RESOURCE: "fake_post_lqueue.json",
|
||||
GWSERVICE_RESOURCE: "fake_post_gwservice.json"
|
||||
}
|
||||
|
||||
FAKE_PUT_RESPONSES = {
|
||||
LSWITCH_RESOURCE: "fake_post_lswitch.json",
|
||||
LROUTER_RESOURCE: "fake_post_lrouter.json",
|
||||
LSWITCH_LPORT_RESOURCE: "fake_post_lswitch_lport.json",
|
||||
LROUTER_LPORT_RESOURCE: "fake_post_lrouter_lport.json",
|
||||
LROUTER_NAT_RESOURCE: "fake_post_lrouter_nat.json",
|
||||
LSWITCH_LPORT_ATT: "fake_put_lswitch_lport_att.json",
|
||||
LROUTER_LPORT_ATT: "fake_put_lrouter_lport_att.json",
|
||||
SECPROF_RESOURCE: "fake_post_security_profile.json",
|
||||
LQUEUE_RESOURCE: "fake_post_lqueue.json",
|
||||
GWSERVICE_RESOURCE: "fake_post_gwservice.json"
|
||||
}
|
||||
|
||||
MANAGED_RELATIONS = {
|
||||
LSWITCH_RESOURCE: [],
|
||||
LROUTER_RESOURCE: [],
|
||||
LSWITCH_LPORT_RESOURCE: ['LogicalPortAttachment'],
|
||||
LROUTER_LPORT_RESOURCE: ['LogicalPortAttachment'],
|
||||
}
|
||||
|
||||
_validators = {
|
||||
LSWITCH_RESOURCE: _validate_resource,
|
||||
LSWITCH_LPORT_RESOURCE: _validate_resource,
|
||||
LROUTER_LPORT_RESOURCE: _validate_resource,
|
||||
SECPROF_RESOURCE: _validate_resource,
|
||||
LQUEUE_RESOURCE: _validate_resource,
|
||||
GWSERVICE_RESOURCE: _validate_resource
|
||||
}
|
||||
|
||||
def __init__(self, fake_files_path):
|
||||
self.fake_files_path = fake_files_path
|
||||
self._fake_lswitch_dict = {}
|
||||
self._fake_lrouter_dict = {}
|
||||
self._fake_lswitch_lport_dict = {}
|
||||
self._fake_lrouter_lport_dict = {}
|
||||
self._fake_lrouter_nat_dict = {}
|
||||
self._fake_lswitch_lportstatus_dict = {}
|
||||
self._fake_lrouter_lportstatus_dict = {}
|
||||
self._fake_securityprofile_dict = {}
|
||||
self._fake_lqueue_dict = {}
|
||||
self._fake_gatewayservice_dict = {}
|
||||
|
||||
def _get_tag(self, resource, scope):
|
||||
tags = [tag['tag'] for tag in resource['tags']
|
||||
if tag['scope'] == scope]
|
||||
return len(tags) > 0 and tags[0]
|
||||
|
||||
def _get_filters(self, querystring):
|
||||
if not querystring:
|
||||
return (None, None, None, None)
|
||||
params = urlparse.parse_qs(querystring)
|
||||
tag_filter = None
|
||||
attr_filter = None
|
||||
if 'tag' in params and 'tag_scope' in params:
|
||||
tag_filter = {'scope': params['tag_scope'][0],
|
||||
'tag': params['tag'][0]}
|
||||
elif 'uuid' in params:
|
||||
attr_filter = {'uuid': params['uuid'][0]}
|
||||
# Handle page length and page cursor parameter
|
||||
page_len = params.get('_page_length')
|
||||
page_cursor = params.get('_page_cursor')
|
||||
if page_len:
|
||||
page_len = int(page_len[0])
|
||||
else:
|
||||
# Explicitly set it to None (avoid 0 or empty list)
|
||||
page_len = None
|
||||
return (tag_filter, attr_filter, page_len, page_cursor)
|
||||
|
||||
def _add_lswitch(self, body):
|
||||
fake_lswitch = jsonutils.loads(body)
|
||||
fake_lswitch['uuid'] = uuidutils.generate_uuid()
|
||||
self._fake_lswitch_dict[fake_lswitch['uuid']] = fake_lswitch
|
||||
# put the tenant_id and the zone_uuid in the main dict
|
||||
# for simplyfying templating
|
||||
zone_uuid = fake_lswitch['transport_zones'][0]['zone_uuid']
|
||||
fake_lswitch['zone_uuid'] = zone_uuid
|
||||
fake_lswitch['tenant_id'] = self._get_tag(fake_lswitch, 'os_tid')
|
||||
fake_lswitch['lport_count'] = 0
|
||||
# set status value
|
||||
fake_lswitch['status'] = 'true'
|
||||
return fake_lswitch
|
||||
|
||||
def _build_lrouter(self, body, uuid=None):
|
||||
fake_lrouter = jsonutils.loads(body)
|
||||
if uuid:
|
||||
fake_lrouter['uuid'] = uuid
|
||||
fake_lrouter['tenant_id'] = self._get_tag(fake_lrouter, 'os_tid')
|
||||
default_nexthop = fake_lrouter['routing_config'].get(
|
||||
'default_route_next_hop')
|
||||
if default_nexthop:
|
||||
fake_lrouter['default_next_hop'] = default_nexthop.get(
|
||||
'gateway_ip_address', '0.0.0.0')
|
||||
else:
|
||||
fake_lrouter['default_next_hop'] = '0.0.0.0'
|
||||
# NOTE(salv-orlando): We won't make the Fake NSX API client
|
||||
# aware of NSX version. The long term plan is to replace it
|
||||
# with behavioral mocking of NSX API requests
|
||||
if 'distributed' not in fake_lrouter:
|
||||
fake_lrouter['distributed'] = False
|
||||
distributed_json = ('"distributed": %s,' %
|
||||
str(fake_lrouter['distributed']).lower())
|
||||
fake_lrouter['distributed_json'] = distributed_json
|
||||
return fake_lrouter
|
||||
|
||||
def _add_lrouter(self, body):
|
||||
fake_lrouter = self._build_lrouter(body,
|
||||
uuidutils.generate_uuid())
|
||||
self._fake_lrouter_dict[fake_lrouter['uuid']] = fake_lrouter
|
||||
fake_lrouter['lport_count'] = 0
|
||||
# set status value
|
||||
fake_lrouter['status'] = 'true'
|
||||
return fake_lrouter
|
||||
|
||||
def _add_lqueue(self, body):
|
||||
fake_lqueue = jsonutils.loads(body)
|
||||
fake_lqueue['uuid'] = uuidutils.generate_uuid()
|
||||
self._fake_lqueue_dict[fake_lqueue['uuid']] = fake_lqueue
|
||||
return fake_lqueue
|
||||
|
||||
def _add_lswitch_lport(self, body, ls_uuid):
|
||||
fake_lport = jsonutils.loads(body)
|
||||
new_uuid = uuidutils.generate_uuid()
|
||||
fake_lport['uuid'] = new_uuid
|
||||
# put the tenant_id and the ls_uuid in the main dict
|
||||
# for simplyfying templating
|
||||
fake_lport['ls_uuid'] = ls_uuid
|
||||
fake_lport['tenant_id'] = self._get_tag(fake_lport, 'os_tid')
|
||||
fake_lport['neutron_port_id'] = self._get_tag(fake_lport,
|
||||
'q_port_id')
|
||||
fake_lport['neutron_device_id'] = self._get_tag(fake_lport, 'vm_id')
|
||||
fake_lport['att_type'] = "NoAttachment"
|
||||
fake_lport['att_info_json'] = ''
|
||||
self._fake_lswitch_lport_dict[fake_lport['uuid']] = fake_lport
|
||||
|
||||
fake_lswitch = self._fake_lswitch_dict[ls_uuid]
|
||||
fake_lswitch['lport_count'] += 1
|
||||
fake_lport_status = fake_lport.copy()
|
||||
fake_lport_status['ls_tenant_id'] = fake_lswitch['tenant_id']
|
||||
fake_lport_status['ls_uuid'] = fake_lswitch['uuid']
|
||||
fake_lport_status['ls_name'] = fake_lswitch['display_name']
|
||||
fake_lport_status['ls_zone_uuid'] = fake_lswitch['zone_uuid']
|
||||
# set status value
|
||||
fake_lport['status'] = 'true'
|
||||
self._fake_lswitch_lportstatus_dict[new_uuid] = fake_lport_status
|
||||
return fake_lport
|
||||
|
||||
def _build_lrouter_lport(self, body, new_uuid=None, lr_uuid=None):
|
||||
fake_lport = jsonutils.loads(body)
|
||||
if new_uuid:
|
||||
fake_lport['uuid'] = new_uuid
|
||||
# put the tenant_id and the le_uuid in the main dict
|
||||
# for simplyfying templating
|
||||
if lr_uuid:
|
||||
fake_lport['lr_uuid'] = lr_uuid
|
||||
fake_lport['tenant_id'] = self._get_tag(fake_lport, 'os_tid')
|
||||
fake_lport['neutron_port_id'] = self._get_tag(fake_lport,
|
||||
'q_port_id')
|
||||
# replace ip_address with its json dump
|
||||
if 'ip_addresses' in fake_lport:
|
||||
ip_addresses_json = jsonutils.dumps(fake_lport['ip_addresses'])
|
||||
fake_lport['ip_addresses_json'] = ip_addresses_json
|
||||
return fake_lport
|
||||
|
||||
def _add_lrouter_lport(self, body, lr_uuid):
|
||||
new_uuid = uuidutils.generate_uuid()
|
||||
fake_lport = self._build_lrouter_lport(body, new_uuid, lr_uuid)
|
||||
self._fake_lrouter_lport_dict[fake_lport['uuid']] = fake_lport
|
||||
try:
|
||||
fake_lrouter = self._fake_lrouter_dict[lr_uuid]
|
||||
except KeyError:
|
||||
raise api_exc.ResourceNotFound()
|
||||
fake_lrouter['lport_count'] += 1
|
||||
fake_lport_status = fake_lport.copy()
|
||||
fake_lport_status['lr_tenant_id'] = fake_lrouter['tenant_id']
|
||||
fake_lport_status['lr_uuid'] = fake_lrouter['uuid']
|
||||
fake_lport_status['lr_name'] = fake_lrouter['display_name']
|
||||
self._fake_lrouter_lportstatus_dict[new_uuid] = fake_lport_status
|
||||
return fake_lport
|
||||
|
||||
def _add_securityprofile(self, body):
|
||||
fake_securityprofile = jsonutils.loads(body)
|
||||
fake_securityprofile['uuid'] = uuidutils.generate_uuid()
|
||||
fake_securityprofile['tenant_id'] = self._get_tag(
|
||||
fake_securityprofile, 'os_tid')
|
||||
|
||||
fake_securityprofile['nova_spid'] = self._get_tag(fake_securityprofile,
|
||||
'nova_spid')
|
||||
self._fake_securityprofile_dict[fake_securityprofile['uuid']] = (
|
||||
fake_securityprofile)
|
||||
return fake_securityprofile
|
||||
|
||||
def _add_lrouter_nat(self, body, lr_uuid):
|
||||
fake_nat = jsonutils.loads(body)
|
||||
new_uuid = uuidutils.generate_uuid()
|
||||
fake_nat['uuid'] = new_uuid
|
||||
fake_nat['lr_uuid'] = lr_uuid
|
||||
self._fake_lrouter_nat_dict[fake_nat['uuid']] = fake_nat
|
||||
if 'match' in fake_nat:
|
||||
match_json = jsonutils.dumps(fake_nat['match'])
|
||||
fake_nat['match_json'] = match_json
|
||||
return fake_nat
|
||||
|
||||
def _add_gatewayservice(self, body):
|
||||
fake_gwservice = jsonutils.loads(body)
|
||||
fake_gwservice['uuid'] = str(uuidutils.generate_uuid())
|
||||
fake_gwservice['tenant_id'] = self._get_tag(
|
||||
fake_gwservice, 'os_tid')
|
||||
# FIXME(salvatore-orlando): For simplicity we're managing only a
|
||||
# single device. Extend the fake client for supporting multiple devices
|
||||
first_gw = fake_gwservice['gateways'][0]
|
||||
fake_gwservice['transport_node_uuid'] = first_gw['transport_node_uuid']
|
||||
fake_gwservice['device_id'] = first_gw['device_id']
|
||||
self._fake_gatewayservice_dict[fake_gwservice['uuid']] = (
|
||||
fake_gwservice)
|
||||
return fake_gwservice
|
||||
|
||||
def _build_relation(self, src, dst, resource_type, relation):
|
||||
if relation not in self.MANAGED_RELATIONS[resource_type]:
|
||||
return # Relation is not desired in output
|
||||
if '_relations' not in src or not src['_relations'].get(relation):
|
||||
return # Item does not have relation
|
||||
relation_data = src['_relations'].get(relation)
|
||||
dst_relations = dst.get('_relations', {})
|
||||
dst_relations[relation] = relation_data
|
||||
dst['_relations'] = dst_relations
|
||||
|
||||
def _fill_attachment(self, att_data, ls_uuid=None,
|
||||
lr_uuid=None, lp_uuid=None):
|
||||
new_data = att_data.copy()
|
||||
for k in ('ls_uuid', 'lr_uuid', 'lp_uuid'):
|
||||
if locals().get(k):
|
||||
new_data[k] = locals()[k]
|
||||
|
||||
def populate_field(field_name):
|
||||
if field_name in att_data:
|
||||
new_data['%s_field' % field_name] = ('"%s" : "%s",'
|
||||
% (field_name,
|
||||
att_data[field_name]))
|
||||
del new_data[field_name]
|
||||
else:
|
||||
new_data['%s_field' % field_name] = ""
|
||||
|
||||
for field in ['vif_uuid', 'peer_port_href', 'vlan_id',
|
||||
'peer_port_uuid', 'l3_gateway_service_uuid']:
|
||||
populate_field(field)
|
||||
return new_data
|
||||
|
||||
def _get_resource_type(self, path):
|
||||
"""Get resource type.
|
||||
|
||||
Identifies resource type and relevant uuids in the uri
|
||||
|
||||
/ws.v1/lswitch/xxx
|
||||
/ws.v1/lswitch/xxx/status
|
||||
/ws.v1/lswitch/xxx/lport/yyy
|
||||
/ws.v1/lswitch/xxx/lport/yyy/status
|
||||
/ws.v1/lrouter/zzz
|
||||
/ws.v1/lrouter/zzz/status
|
||||
/ws.v1/lrouter/zzz/lport/www
|
||||
/ws.v1/lrouter/zzz/lport/www/status
|
||||
/ws.v1/lqueue/xxx
|
||||
"""
|
||||
# The first element will always be 'ws.v1' - so we just discard it
|
||||
uri_split = path.split('/')[1:]
|
||||
# parse uri_split backwards
|
||||
suffix = ""
|
||||
idx = len(uri_split) - 1
|
||||
if 'status' in uri_split[idx]:
|
||||
suffix = "status"
|
||||
idx = idx - 1
|
||||
elif 'attachment' in uri_split[idx]:
|
||||
suffix = "attachment"
|
||||
idx = idx - 1
|
||||
# then check if we have an uuid
|
||||
uuids = []
|
||||
if uri_split[idx].replace('-', '') not in self.RESOURCES:
|
||||
uuids.append(uri_split[idx])
|
||||
idx = idx - 1
|
||||
resource_type = "%s%s" % (uri_split[idx], suffix)
|
||||
if idx > 1:
|
||||
uuids.insert(0, uri_split[idx - 1])
|
||||
resource_type = "%s_%s" % (uri_split[idx - 2], resource_type)
|
||||
return (resource_type.replace('-', ''), uuids)
|
||||
|
||||
def _list(self, resource_type, response_file,
|
||||
parent_uuid=None, query=None, relations=None):
|
||||
(tag_filter, attr_filter,
|
||||
page_len, page_cursor) = self._get_filters(query)
|
||||
# result_count attribute in response should appear only when
|
||||
# page_cursor is not specified
|
||||
do_result_count = not page_cursor
|
||||
with open("%s/%s" % (self.fake_files_path, response_file)) as f:
|
||||
response_template = f.read()
|
||||
res_dict = getattr(self, '_fake_%s_dict' % resource_type)
|
||||
if parent_uuid == '*':
|
||||
parent_uuid = None
|
||||
# NSX raises ResourceNotFound if lswitch doesn't exist and is not *
|
||||
elif not res_dict and resource_type == self.LSWITCH_LPORT_RESOURCE:
|
||||
raise api_exc.ResourceNotFound()
|
||||
|
||||
def _attr_match(res_uuid):
|
||||
if not attr_filter:
|
||||
return True
|
||||
item = res_dict[res_uuid]
|
||||
for (attr, value) in attr_filter.iteritems():
|
||||
if item.get(attr) != value:
|
||||
return False
|
||||
return True
|
||||
|
||||
def _tag_match(res_uuid):
|
||||
if not tag_filter:
|
||||
return True
|
||||
return any([x['scope'] == tag_filter['scope'] and
|
||||
x['tag'] == tag_filter['tag']
|
||||
for x in res_dict[res_uuid]['tags']])
|
||||
|
||||
def _lswitch_match(res_uuid):
|
||||
# verify that the switch exist
|
||||
if parent_uuid and parent_uuid not in self._fake_lswitch_dict:
|
||||
raise Exception(_("lswitch:%s not found") % parent_uuid)
|
||||
if (not parent_uuid
|
||||
or res_dict[res_uuid].get('ls_uuid') == parent_uuid):
|
||||
return True
|
||||
return False
|
||||
|
||||
def _lrouter_match(res_uuid):
|
||||
# verify that the router exist
|
||||
if parent_uuid and parent_uuid not in self._fake_lrouter_dict:
|
||||
raise api_exc.ResourceNotFound()
|
||||
if (not parent_uuid or
|
||||
res_dict[res_uuid].get('lr_uuid') == parent_uuid):
|
||||
return True
|
||||
return False
|
||||
|
||||
def _cursor_match(res_uuid, page_cursor):
|
||||
if not page_cursor:
|
||||
return True
|
||||
if page_cursor == res_uuid:
|
||||
# always return True once page_cursor has been found
|
||||
page_cursor = None
|
||||
return True
|
||||
return False
|
||||
|
||||
def _build_item(resource):
|
||||
item = jsonutils.loads(response_template % resource)
|
||||
if relations:
|
||||
for relation in relations:
|
||||
self._build_relation(resource, item,
|
||||
resource_type, relation)
|
||||
return item
|
||||
|
||||
for item in res_dict.itervalues():
|
||||
if 'tags' in item:
|
||||
item['tags_json'] = jsonutils.dumps(item['tags'])
|
||||
if resource_type in (self.LSWITCH_LPORT_RESOURCE,
|
||||
self.LSWITCH_LPORT_ATT,
|
||||
self.LSWITCH_LPORT_STATUS):
|
||||
parent_func = _lswitch_match
|
||||
elif resource_type in (self.LROUTER_LPORT_RESOURCE,
|
||||
self.LROUTER_LPORT_ATT,
|
||||
self.LROUTER_NAT_RESOURCE,
|
||||
self.LROUTER_LPORT_STATUS):
|
||||
parent_func = _lrouter_match
|
||||
else:
|
||||
parent_func = lambda x: True
|
||||
|
||||
items = [_build_item(res_dict[res_uuid])
|
||||
for res_uuid in res_dict
|
||||
if (parent_func(res_uuid) and
|
||||
_tag_match(res_uuid) and
|
||||
_attr_match(res_uuid) and
|
||||
_cursor_match(res_uuid, page_cursor))]
|
||||
# Rather inefficient, but hey this is just a mock!
|
||||
next_cursor = None
|
||||
total_items = len(items)
|
||||
if page_len:
|
||||
try:
|
||||
next_cursor = items[page_len]['uuid']
|
||||
except IndexError:
|
||||
next_cursor = None
|
||||
items = items[:page_len]
|
||||
response_dict = {'results': items}
|
||||
if next_cursor:
|
||||
response_dict['page_cursor'] = next_cursor
|
||||
if do_result_count:
|
||||
response_dict['result_count'] = total_items
|
||||
return jsonutils.dumps(response_dict)
|
||||
|
||||
def _show(self, resource_type, response_file,
|
||||
uuid1, uuid2=None, relations=None):
|
||||
target_uuid = uuid2 or uuid1
|
||||
if resource_type.endswith('attachment'):
|
||||
resource_type = resource_type[:resource_type.index('attachment')]
|
||||
with open("%s/%s" % (self.fake_files_path, response_file)) as f:
|
||||
response_template = f.read()
|
||||
res_dict = getattr(self, '_fake_%s_dict' % resource_type)
|
||||
for item in res_dict.itervalues():
|
||||
if 'tags' in item:
|
||||
item['tags_json'] = jsonutils.dumps(item['tags'])
|
||||
|
||||
# replace sec prof rules with their json dump
|
||||
def jsonify_rules(rule_key):
|
||||
if rule_key in item:
|
||||
rules_json = jsonutils.dumps(item[rule_key])
|
||||
item['%s_json' % rule_key] = rules_json
|
||||
jsonify_rules('logical_port_egress_rules')
|
||||
jsonify_rules('logical_port_ingress_rules')
|
||||
|
||||
items = [jsonutils.loads(response_template % res_dict[res_uuid])
|
||||
for res_uuid in res_dict if res_uuid == target_uuid]
|
||||
if items:
|
||||
return jsonutils.dumps(items[0])
|
||||
raise api_exc.ResourceNotFound()
|
||||
|
||||
def handle_get(self, url):
|
||||
#TODO(salvatore-orlando): handle field selection
|
||||
parsedurl = urlparse.urlparse(url)
|
||||
(res_type, uuids) = self._get_resource_type(parsedurl.path)
|
||||
relations = urlparse.parse_qs(parsedurl.query).get('relations')
|
||||
response_file = self.FAKE_GET_RESPONSES.get(res_type)
|
||||
if not response_file:
|
||||
raise api_exc.NsxApiException()
|
||||
if 'lport' in res_type or 'nat' in res_type:
|
||||
if len(uuids) > 1:
|
||||
return self._show(res_type, response_file, uuids[0],
|
||||
uuids[1], relations=relations)
|
||||
else:
|
||||
return self._list(res_type, response_file, uuids[0],
|
||||
query=parsedurl.query, relations=relations)
|
||||
elif ('lswitch' in res_type or
|
||||
'lrouter' in res_type or
|
||||
self.SECPROF_RESOURCE in res_type or
|
||||
self.LQUEUE_RESOURCE in res_type or
|
||||
'gatewayservice' in res_type):
|
||||
LOG.debug("UUIDS:%s", uuids)
|
||||
if uuids:
|
||||
return self._show(res_type, response_file, uuids[0],
|
||||
relations=relations)
|
||||
else:
|
||||
return self._list(res_type, response_file,
|
||||
query=parsedurl.query,
|
||||
relations=relations)
|
||||
else:
|
||||
raise Exception("unknown resource:%s" % res_type)
|
||||
|
||||
def handle_post(self, url, body):
|
||||
parsedurl = urlparse.urlparse(url)
|
||||
(res_type, uuids) = self._get_resource_type(parsedurl.path)
|
||||
response_file = self.FAKE_POST_RESPONSES.get(res_type)
|
||||
if not response_file:
|
||||
raise Exception("resource not found")
|
||||
with open("%s/%s" % (self.fake_files_path, response_file)) as f:
|
||||
response_template = f.read()
|
||||
add_resource = getattr(self, '_add_%s' % res_type)
|
||||
body_json = jsonutils.loads(body)
|
||||
val_func = self._validators.get(res_type)
|
||||
if val_func:
|
||||
val_func(body_json)
|
||||
args = [body]
|
||||
if uuids:
|
||||
args.append(uuids[0])
|
||||
response = response_template % add_resource(*args)
|
||||
return response
|
||||
|
||||
def handle_put(self, url, body):
|
||||
parsedurl = urlparse.urlparse(url)
|
||||
(res_type, uuids) = self._get_resource_type(parsedurl.path)
|
||||
response_file = self.FAKE_PUT_RESPONSES.get(res_type)
|
||||
if not response_file:
|
||||
raise Exception("resource not found")
|
||||
with open("%s/%s" % (self.fake_files_path, response_file)) as f:
|
||||
response_template = f.read()
|
||||
# Manage attachment operations
|
||||
is_attachment = False
|
||||
if res_type.endswith('attachment'):
|
||||
is_attachment = True
|
||||
res_type = res_type[:res_type.index('attachment')]
|
||||
res_dict = getattr(self, '_fake_%s_dict' % res_type)
|
||||
body_json = jsonutils.loads(body)
|
||||
val_func = self._validators.get(res_type)
|
||||
if val_func:
|
||||
val_func(body_json)
|
||||
try:
|
||||
resource = res_dict[uuids[-1]]
|
||||
except KeyError:
|
||||
raise api_exc.ResourceNotFound()
|
||||
if not is_attachment:
|
||||
edit_resource = getattr(self, '_build_%s' % res_type, None)
|
||||
if edit_resource:
|
||||
body_json = edit_resource(body)
|
||||
resource.update(body_json)
|
||||
else:
|
||||
relations = resource.get("_relations", {})
|
||||
body_2 = jsonutils.loads(body)
|
||||
resource['att_type'] = body_2['type']
|
||||
relations['LogicalPortAttachment'] = body_2
|
||||
resource['_relations'] = relations
|
||||
if body_2['type'] == "PatchAttachment":
|
||||
# We need to do a trick here
|
||||
if self.LROUTER_RESOURCE in res_type:
|
||||
res_type_2 = res_type.replace(self.LROUTER_RESOURCE,
|
||||
self.LSWITCH_RESOURCE)
|
||||
elif self.LSWITCH_RESOURCE in res_type:
|
||||
res_type_2 = res_type.replace(self.LSWITCH_RESOURCE,
|
||||
self.LROUTER_RESOURCE)
|
||||
res_dict_2 = getattr(self, '_fake_%s_dict' % res_type_2)
|
||||
body_2['peer_port_uuid'] = uuids[-1]
|
||||
resource_2 = \
|
||||
res_dict_2[jsonutils.loads(body)['peer_port_uuid']]
|
||||
relations_2 = resource_2.get("_relations")
|
||||
if not relations_2:
|
||||
relations_2 = {}
|
||||
relations_2['LogicalPortAttachment'] = body_2
|
||||
resource_2['_relations'] = relations_2
|
||||
resource['peer_port_uuid'] = body_2['peer_port_uuid']
|
||||
resource['att_info_json'] = (
|
||||
"\"peer_port_uuid\": \"%s\"," %
|
||||
resource_2['uuid'])
|
||||
resource_2['att_info_json'] = (
|
||||
"\"peer_port_uuid\": \"%s\"," %
|
||||
body_2['peer_port_uuid'])
|
||||
elif body_2['type'] == "L3GatewayAttachment":
|
||||
resource['attachment_gwsvc_uuid'] = (
|
||||
body_2['l3_gateway_service_uuid'])
|
||||
resource['vlan_id'] = body_2.get('vlan_id')
|
||||
elif body_2['type'] == "L2GatewayAttachment":
|
||||
resource['attachment_gwsvc_uuid'] = (
|
||||
body_2['l2_gateway_service_uuid'])
|
||||
elif body_2['type'] == "VifAttachment":
|
||||
resource['vif_uuid'] = body_2['vif_uuid']
|
||||
resource['att_info_json'] = (
|
||||
"\"vif_uuid\": \"%s\"," % body_2['vif_uuid'])
|
||||
|
||||
if not is_attachment:
|
||||
response = response_template % resource
|
||||
else:
|
||||
if res_type == self.LROUTER_LPORT_RESOURCE:
|
||||
lr_uuid = uuids[0]
|
||||
ls_uuid = None
|
||||
elif res_type == self.LSWITCH_LPORT_RESOURCE:
|
||||
ls_uuid = uuids[0]
|
||||
lr_uuid = None
|
||||
lp_uuid = uuids[1]
|
||||
response = response_template % self._fill_attachment(
|
||||
jsonutils.loads(body), ls_uuid, lr_uuid, lp_uuid)
|
||||
return response
|
||||
|
||||
def handle_delete(self, url):
|
||||
parsedurl = urlparse.urlparse(url)
|
||||
(res_type, uuids) = self._get_resource_type(parsedurl.path)
|
||||
response_file = self.FAKE_PUT_RESPONSES.get(res_type)
|
||||
if not response_file:
|
||||
raise Exception("resource not found")
|
||||
res_dict = getattr(self, '_fake_%s_dict' % res_type)
|
||||
try:
|
||||
del res_dict[uuids[-1]]
|
||||
except KeyError:
|
||||
raise api_exc.ResourceNotFound()
|
||||
return ""
|
||||
|
||||
def fake_request(self, *args, **kwargs):
|
||||
method = args[0]
|
||||
handler = getattr(self, "handle_%s" % method.lower())
|
||||
return handler(*args[1:])
|
||||
|
||||
def reset_all(self):
|
||||
self._fake_lswitch_dict.clear()
|
||||
self._fake_lrouter_dict.clear()
|
||||
self._fake_lswitch_lport_dict.clear()
|
||||
self._fake_lrouter_lport_dict.clear()
|
||||
self._fake_lswitch_lportstatus_dict.clear()
|
||||
self._fake_lrouter_lportstatus_dict.clear()
|
||||
self._fake_lqueue_dict.clear()
|
||||
self._fake_securityprofile_dict.clear()
|
||||
self._fake_gatewayservice_dict.clear()
|
@ -1,35 +0,0 @@
|
||||
# Copyright 2011 VMware, Inc.
|
||||
#
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import httplib
|
||||
|
||||
from neutron.plugins.vmware import api_client
|
||||
from neutron.tests import base
|
||||
|
||||
|
||||
class ApiCommonTest(base.BaseTestCase):
|
||||
|
||||
def test_ctrl_conn_to_str(self):
|
||||
conn = httplib.HTTPSConnection('localhost', 4242, timeout=0)
|
||||
self.assertTrue(
|
||||
api_client.ctrl_conn_to_str(conn) == 'https://localhost:4242')
|
||||
|
||||
conn = httplib.HTTPConnection('localhost', 4242, timeout=0)
|
||||
self.assertTrue(
|
||||
api_client.ctrl_conn_to_str(conn) == 'http://localhost:4242')
|
||||
|
||||
self.assertRaises(TypeError, api_client.ctrl_conn_to_str,
|
||||
('not an httplib.HTTPSConnection'))
|
@ -1,333 +0,0 @@
|
||||
# Copyright (C) 2009-2012 VMware, Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import httplib
|
||||
import random
|
||||
|
||||
import eventlet
|
||||
from eventlet.green import urllib2
|
||||
import mock
|
||||
|
||||
from neutron.i18n import _LI
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.vmware.api_client import eventlet_client as client
|
||||
from neutron.plugins.vmware.api_client import eventlet_request as request
|
||||
from neutron.tests import base
|
||||
from neutron.tests.unit import vmware
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
REQUEST_TIMEOUT = 1
|
||||
|
||||
|
||||
def fetch(url):
|
||||
return urllib2.urlopen(url).read()
|
||||
|
||||
|
||||
class ApiRequestEventletTest(base.BaseTestCase):
|
||||
|
||||
def setUp(self):
|
||||
|
||||
super(ApiRequestEventletTest, self).setUp()
|
||||
self.client = client.EventletApiClient(
|
||||
[("127.0.0.1", 4401, True)], "admin", "admin")
|
||||
self.url = "/ws.v1/_debug"
|
||||
self.req = request.EventletApiRequest(self.client, self.url)
|
||||
|
||||
def tearDown(self):
|
||||
self.client = None
|
||||
self.req = None
|
||||
super(ApiRequestEventletTest, self).tearDown()
|
||||
|
||||
def test_construct_eventlet_api_request(self):
|
||||
e = request.EventletApiRequest(self.client, self.url)
|
||||
self.assertIsNotNone(e)
|
||||
|
||||
def test_apirequest_spawn(self):
|
||||
def x(id):
|
||||
eventlet.greenthread.sleep(random.random())
|
||||
LOG.info(_LI('spawned: %d'), id)
|
||||
|
||||
for i in range(10):
|
||||
request.EventletApiRequest._spawn(x, i)
|
||||
|
||||
def test_apirequest_start(self):
|
||||
for i in range(10):
|
||||
a = request.EventletApiRequest(
|
||||
self.client, self.url)
|
||||
a._handle_request = mock.Mock()
|
||||
a.start()
|
||||
eventlet.greenthread.sleep(0.1)
|
||||
LOG.info(_LI('_handle_request called: %s'),
|
||||
a._handle_request.called)
|
||||
request.EventletApiRequest.joinall()
|
||||
|
||||
def test_join_with_handle_request(self):
|
||||
self.req._handle_request = mock.Mock()
|
||||
self.req.start()
|
||||
self.req.join()
|
||||
self.assertTrue(self.req._handle_request.called)
|
||||
|
||||
def test_join_without_handle_request(self):
|
||||
self.req._handle_request = mock.Mock()
|
||||
self.req.join()
|
||||
self.assertFalse(self.req._handle_request.called)
|
||||
|
||||
def test_copy(self):
|
||||
req = self.req.copy()
|
||||
for att in [
|
||||
'_api_client', '_url', '_method', '_body', '_headers',
|
||||
'_http_timeout', '_request_timeout', '_retries',
|
||||
'_redirects', '_auto_login']:
|
||||
self.assertTrue(getattr(req, att) is getattr(self.req, att))
|
||||
|
||||
def test_request_error(self):
|
||||
self.assertIsNone(self.req.request_error)
|
||||
|
||||
def test_run_and_handle_request(self):
|
||||
self.req._request_timeout = None
|
||||
self.req._handle_request = mock.Mock()
|
||||
self.req.start()
|
||||
self.req.join()
|
||||
self.assertTrue(self.req._handle_request.called)
|
||||
|
||||
def test_run_and_timeout(self):
|
||||
def my_handle_request():
|
||||
LOG.info('my_handle_request() self: %s' % self.req)
|
||||
LOG.info('my_handle_request() dir(self): %s' % dir(self.req))
|
||||
eventlet.greenthread.sleep(REQUEST_TIMEOUT * 2)
|
||||
|
||||
with mock.patch.object(
|
||||
self.req,
|
||||
'_handle_request',
|
||||
new=my_handle_request
|
||||
):
|
||||
self.req._request_timeout = REQUEST_TIMEOUT
|
||||
self.req.start()
|
||||
self.assertIsNone(self.req.join())
|
||||
|
||||
def prep_issue_request(self):
|
||||
mysock = mock.Mock()
|
||||
mysock.gettimeout.return_value = 4242
|
||||
|
||||
myresponse = mock.Mock()
|
||||
myresponse.read.return_value = 'body'
|
||||
myresponse.getheaders.return_value = 'headers'
|
||||
myresponse.status = httplib.MOVED_PERMANENTLY
|
||||
|
||||
myconn = mock.Mock()
|
||||
myconn.request.return_value = None
|
||||
myconn.sock = mysock
|
||||
myconn.getresponse.return_value = myresponse
|
||||
myconn.__str__ = mock.Mock()
|
||||
myconn.__str__.return_value = 'myconn string'
|
||||
|
||||
req = self.req
|
||||
req._redirect_params = mock.Mock()
|
||||
req._redirect_params.return_value = (myconn, 'url')
|
||||
req._request_str = mock.Mock()
|
||||
req._request_str.return_value = 'http://cool/cool'
|
||||
|
||||
client = self.client
|
||||
client.need_login = False
|
||||
client._auto_login = False
|
||||
client._auth_cookie = False
|
||||
client.acquire_connection = mock.Mock()
|
||||
client.acquire_connection.return_value = myconn
|
||||
client.release_connection = mock.Mock()
|
||||
|
||||
return (mysock, myresponse, myconn)
|
||||
|
||||
def test_issue_request_trigger_exception(self):
|
||||
(mysock, myresponse, myconn) = self.prep_issue_request()
|
||||
self.client.acquire_connection.return_value = None
|
||||
|
||||
self.req._issue_request()
|
||||
self.assertIsInstance(self.req._request_error, Exception)
|
||||
self.assertTrue(self.client.acquire_connection.called)
|
||||
|
||||
def test_issue_request_handle_none_sock(self):
|
||||
(mysock, myresponse, myconn) = self.prep_issue_request()
|
||||
myconn.sock = None
|
||||
self.req.start()
|
||||
self.assertIsNone(self.req.join())
|
||||
self.assertTrue(self.client.acquire_connection.called)
|
||||
|
||||
def test_issue_request_exceed_maximum_retries(self):
|
||||
(mysock, myresponse, myconn) = self.prep_issue_request()
|
||||
self.req.start()
|
||||
self.assertIsNone(self.req.join())
|
||||
self.assertTrue(self.client.acquire_connection.called)
|
||||
|
||||
def test_issue_request_trigger_non_redirect(self):
|
||||
(mysock, myresponse, myconn) = self.prep_issue_request()
|
||||
myresponse.status = httplib.OK
|
||||
self.req.start()
|
||||
self.assertIsNone(self.req.join())
|
||||
self.assertTrue(self.client.acquire_connection.called)
|
||||
|
||||
def test_issue_request_trigger_internal_server_error(self):
|
||||
(mysock, myresponse, myconn) = self.prep_issue_request()
|
||||
self.req._redirect_params.return_value = (myconn, None)
|
||||
self.req.start()
|
||||
self.assertIsNone(self.req.join())
|
||||
self.assertTrue(self.client.acquire_connection.called)
|
||||
|
||||
def test_redirect_params_break_on_location(self):
|
||||
myconn = mock.Mock()
|
||||
(conn, retval) = self.req._redirect_params(
|
||||
myconn, [('location', None)])
|
||||
self.assertIsNone(retval)
|
||||
|
||||
def test_redirect_params_parse_a_url(self):
|
||||
myconn = mock.Mock()
|
||||
(conn, retval) = self.req._redirect_params(
|
||||
myconn, [('location', '/path/a/b/c')])
|
||||
self.assertIsNotNone(retval)
|
||||
|
||||
def test_redirect_params_invalid_redirect_location(self):
|
||||
myconn = mock.Mock()
|
||||
(conn, retval) = self.req._redirect_params(
|
||||
myconn, [('location', '+path/a/b/c')])
|
||||
self.assertIsNone(retval)
|
||||
|
||||
def test_redirect_params_invalid_scheme(self):
|
||||
myconn = mock.Mock()
|
||||
(conn, retval) = self.req._redirect_params(
|
||||
myconn, [('location', 'invalidscheme://hostname:1/path')])
|
||||
self.assertIsNone(retval)
|
||||
|
||||
def test_redirect_params_setup_https_with_cooki(self):
|
||||
with mock.patch(vmware.CLIENT_NAME) as mock_client:
|
||||
api_client = mock_client.return_value
|
||||
self.req._api_client = api_client
|
||||
myconn = mock.Mock()
|
||||
(conn, retval) = self.req._redirect_params(
|
||||
myconn, [('location', 'https://host:1/path')])
|
||||
|
||||
self.assertIsNotNone(retval)
|
||||
self.assertTrue(api_client.acquire_redirect_connection.called)
|
||||
|
||||
def test_redirect_params_setup_htttps_and_query(self):
|
||||
with mock.patch(vmware.CLIENT_NAME) as mock_client:
|
||||
api_client = mock_client.return_value
|
||||
self.req._api_client = api_client
|
||||
myconn = mock.Mock()
|
||||
(conn, retval) = self.req._redirect_params(myconn, [
|
||||
('location', 'https://host:1/path?q=1')])
|
||||
|
||||
self.assertIsNotNone(retval)
|
||||
self.assertTrue(api_client.acquire_redirect_connection.called)
|
||||
|
||||
def test_redirect_params_setup_https_connection_no_cookie(self):
|
||||
with mock.patch(vmware.CLIENT_NAME) as mock_client:
|
||||
api_client = mock_client.return_value
|
||||
self.req._api_client = api_client
|
||||
myconn = mock.Mock()
|
||||
(conn, retval) = self.req._redirect_params(myconn, [
|
||||
('location', 'https://host:1/path')])
|
||||
|
||||
self.assertIsNotNone(retval)
|
||||
self.assertTrue(api_client.acquire_redirect_connection.called)
|
||||
|
||||
def test_redirect_params_setup_https_and_query_no_cookie(self):
|
||||
with mock.patch(vmware.CLIENT_NAME) as mock_client:
|
||||
api_client = mock_client.return_value
|
||||
self.req._api_client = api_client
|
||||
myconn = mock.Mock()
|
||||
(conn, retval) = self.req._redirect_params(
|
||||
myconn, [('location', 'https://host:1/path?q=1')])
|
||||
self.assertIsNotNone(retval)
|
||||
self.assertTrue(api_client.acquire_redirect_connection.called)
|
||||
|
||||
def test_redirect_params_path_only_with_query(self):
|
||||
with mock.patch(vmware.CLIENT_NAME) as mock_client:
|
||||
api_client = mock_client.return_value
|
||||
api_client.wait_for_login.return_value = None
|
||||
api_client.auth_cookie = None
|
||||
api_client.acquire_connection.return_value = True
|
||||
myconn = mock.Mock()
|
||||
(conn, retval) = self.req._redirect_params(myconn, [
|
||||
('location', '/path?q=1')])
|
||||
self.assertIsNotNone(retval)
|
||||
|
||||
def test_handle_request_auto_login(self):
|
||||
self.req._auto_login = True
|
||||
self.req._api_client = mock.Mock()
|
||||
self.req._api_client.need_login = True
|
||||
self.req._request_str = mock.Mock()
|
||||
self.req._request_str.return_value = 'http://cool/cool'
|
||||
self.req.spawn = mock.Mock()
|
||||
self.req._handle_request()
|
||||
|
||||
def test_handle_request_auto_login_unauth(self):
|
||||
self.req._auto_login = True
|
||||
self.req._api_client = mock.Mock()
|
||||
self.req._api_client.need_login = True
|
||||
self.req._request_str = mock.Mock()
|
||||
self.req._request_str.return_value = 'http://cool/cool'
|
||||
|
||||
import socket
|
||||
resp = httplib.HTTPResponse(socket.socket())
|
||||
resp.status = httplib.UNAUTHORIZED
|
||||
mywaiter = mock.Mock()
|
||||
mywaiter.wait = mock.Mock()
|
||||
mywaiter.wait.return_value = resp
|
||||
self.req.spawn = mock.Mock(return_value=mywaiter)
|
||||
self.req._handle_request()
|
||||
|
||||
def test_construct_eventlet_login_request(self):
|
||||
r = request.LoginRequestEventlet(self.client, 'user', 'password')
|
||||
self.assertIsNotNone(r)
|
||||
|
||||
def test_session_cookie_session_cookie_retrieval(self):
|
||||
r = request.LoginRequestEventlet(self.client, 'user', 'password')
|
||||
r.successful = mock.Mock()
|
||||
r.successful.return_value = True
|
||||
r.value = mock.Mock()
|
||||
r.value.get_header = mock.Mock()
|
||||
r.value.get_header.return_value = 'cool'
|
||||
self.assertIsNotNone(r.session_cookie())
|
||||
|
||||
def test_session_cookie_not_retrieved(self):
|
||||
r = request.LoginRequestEventlet(self.client, 'user', 'password')
|
||||
r.successful = mock.Mock()
|
||||
r.successful.return_value = False
|
||||
r.value = mock.Mock()
|
||||
r.value.get_header = mock.Mock()
|
||||
r.value.get_header.return_value = 'cool'
|
||||
self.assertIsNone(r.session_cookie())
|
||||
|
||||
def test_construct_eventlet_get_api_providers_request(self):
|
||||
r = request.GetApiProvidersRequestEventlet(self.client)
|
||||
self.assertIsNotNone(r)
|
||||
|
||||
def test_api_providers_none_api_providers(self):
|
||||
r = request.GetApiProvidersRequestEventlet(self.client)
|
||||
r.successful = mock.Mock(return_value=False)
|
||||
self.assertIsNone(r.api_providers())
|
||||
|
||||
def test_api_providers_non_none_api_providers(self):
|
||||
r = request.GetApiProvidersRequestEventlet(self.client)
|
||||
r.value = mock.Mock()
|
||||
r.value.body = """{
|
||||
"results": [
|
||||
{ "roles": [
|
||||
{ "role": "api_provider",
|
||||
"listen_addr": "pssl:1.1.1.1:1" }]}]}"""
|
||||
r.successful = mock.Mock(return_value=True)
|
||||
LOG.info('%s' % r.api_providers())
|
||||
self.assertIsNotNone(r.api_providers())
|
@ -1,103 +0,0 @@
|
||||
# Copyright 2014 VMware, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from sqlalchemy import orm
|
||||
|
||||
from neutron import context
|
||||
from neutron.plugins.vmware.common import exceptions as p_exc
|
||||
from neutron.plugins.vmware.dbexts import lsn_db
|
||||
from neutron.plugins.vmware.dbexts import nsx_models
|
||||
from neutron.tests.unit import testlib_api
|
||||
|
||||
|
||||
class LSNTestCase(testlib_api.SqlTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(LSNTestCase, self).setUp()
|
||||
self.ctx = context.get_admin_context()
|
||||
self.net_id = 'foo_network_id'
|
||||
self.lsn_id = 'foo_lsn_id'
|
||||
self.lsn_port_id = 'foo_port_id'
|
||||
self.subnet_id = 'foo_subnet_id'
|
||||
self.mac_addr = 'aa:bb:cc:dd:ee:ff'
|
||||
|
||||
def test_lsn_add(self):
|
||||
lsn_db.lsn_add(self.ctx, self.net_id, self.lsn_id)
|
||||
lsn = (self.ctx.session.query(nsx_models.Lsn).
|
||||
filter_by(lsn_id=self.lsn_id).one())
|
||||
self.assertEqual(self.lsn_id, lsn.lsn_id)
|
||||
|
||||
def test_lsn_remove(self):
|
||||
lsn_db.lsn_add(self.ctx, self.net_id, self.lsn_id)
|
||||
lsn_db.lsn_remove(self.ctx, self.lsn_id)
|
||||
q = self.ctx.session.query(nsx_models.Lsn).filter_by(
|
||||
lsn_id=self.lsn_id)
|
||||
self.assertRaises(orm.exc.NoResultFound, q.one)
|
||||
|
||||
def test_lsn_remove_for_network(self):
|
||||
lsn_db.lsn_add(self.ctx, self.net_id, self.lsn_id)
|
||||
lsn_db.lsn_remove_for_network(self.ctx, self.net_id)
|
||||
q = self.ctx.session.query(nsx_models.Lsn).filter_by(
|
||||
lsn_id=self.lsn_id)
|
||||
self.assertRaises(orm.exc.NoResultFound, q.one)
|
||||
|
||||
def test_lsn_get_for_network(self):
|
||||
result = lsn_db.lsn_get_for_network(self.ctx, self.net_id,
|
||||
raise_on_err=False)
|
||||
self.assertIsNone(result)
|
||||
|
||||
def test_lsn_get_for_network_raise_not_found(self):
|
||||
self.assertRaises(p_exc.LsnNotFound,
|
||||
lsn_db.lsn_get_for_network,
|
||||
self.ctx, self.net_id)
|
||||
|
||||
def test_lsn_port_add(self):
|
||||
lsn_db.lsn_add(self.ctx, self.net_id, self.lsn_id)
|
||||
lsn_db.lsn_port_add_for_lsn(self.ctx, self.lsn_port_id,
|
||||
self.subnet_id, self.mac_addr, self.lsn_id)
|
||||
result = (self.ctx.session.query(nsx_models.LsnPort).
|
||||
filter_by(lsn_port_id=self.lsn_port_id).one())
|
||||
self.assertEqual(self.lsn_port_id, result.lsn_port_id)
|
||||
|
||||
def test_lsn_port_get_for_mac(self):
|
||||
lsn_db.lsn_add(self.ctx, self.net_id, self.lsn_id)
|
||||
lsn_db.lsn_port_add_for_lsn(self.ctx, self.lsn_port_id,
|
||||
self.subnet_id, self.mac_addr, self.lsn_id)
|
||||
result = lsn_db.lsn_port_get_for_mac(self.ctx, self.mac_addr)
|
||||
self.assertEqual(self.mac_addr, result.mac_addr)
|
||||
|
||||
def test_lsn_port_get_for_mac_raise_not_found(self):
|
||||
self.assertRaises(p_exc.LsnPortNotFound,
|
||||
lsn_db.lsn_port_get_for_mac,
|
||||
self.ctx, self.mac_addr)
|
||||
|
||||
def test_lsn_port_get_for_subnet(self):
|
||||
lsn_db.lsn_add(self.ctx, self.net_id, self.lsn_id)
|
||||
lsn_db.lsn_port_add_for_lsn(self.ctx, self.lsn_port_id,
|
||||
self.subnet_id, self.mac_addr, self.lsn_id)
|
||||
result = lsn_db.lsn_port_get_for_subnet(self.ctx, self.subnet_id)
|
||||
self.assertEqual(self.subnet_id, result.sub_id)
|
||||
|
||||
def test_lsn_port_get_for_subnet_raise_not_found(self):
|
||||
self.assertRaises(p_exc.LsnPortNotFound,
|
||||
lsn_db.lsn_port_get_for_subnet,
|
||||
self.ctx, self.mac_addr)
|
||||
|
||||
def test_lsn_port_remove(self):
|
||||
lsn_db.lsn_add(self.ctx, self.net_id, self.lsn_id)
|
||||
lsn_db.lsn_port_remove(self.ctx, self.lsn_port_id)
|
||||
q = (self.ctx.session.query(nsx_models.LsnPort).
|
||||
filter_by(lsn_port_id=self.lsn_port_id))
|
||||
self.assertRaises(orm.exc.NoResultFound, q.one)
|
@ -1,84 +0,0 @@
|
||||
# Copyright 2013 VMware, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from oslo_db import exception as d_exc
|
||||
|
||||
from neutron import context
|
||||
from neutron.db import models_v2
|
||||
from neutron.plugins.vmware.dbexts import db as nsx_db
|
||||
from neutron.plugins.vmware.dbexts import nsx_models
|
||||
from neutron.tests.unit import testlib_api
|
||||
|
||||
|
||||
class NsxDBTestCase(testlib_api.SqlTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(NsxDBTestCase, self).setUp()
|
||||
self.ctx = context.get_admin_context()
|
||||
|
||||
def _setup_neutron_network_and_port(self, network_id, port_id):
|
||||
with self.ctx.session.begin(subtransactions=True):
|
||||
self.ctx.session.add(models_v2.Network(id=network_id))
|
||||
port = models_v2.Port(id=port_id,
|
||||
network_id=network_id,
|
||||
mac_address='foo_mac_address',
|
||||
admin_state_up=True,
|
||||
status='ACTIVE',
|
||||
device_id='',
|
||||
device_owner='')
|
||||
self.ctx.session.add(port)
|
||||
|
||||
def test_add_neutron_nsx_port_mapping_handle_duplicate_constraint(self):
|
||||
neutron_net_id = 'foo_neutron_network_id'
|
||||
neutron_port_id = 'foo_neutron_port_id'
|
||||
nsx_port_id = 'foo_nsx_port_id'
|
||||
nsx_switch_id = 'foo_nsx_switch_id'
|
||||
self._setup_neutron_network_and_port(neutron_net_id, neutron_port_id)
|
||||
|
||||
nsx_db.add_neutron_nsx_port_mapping(
|
||||
self.ctx.session, neutron_port_id, nsx_switch_id, nsx_port_id)
|
||||
# Call the method twice to trigger a db duplicate constraint error
|
||||
nsx_db.add_neutron_nsx_port_mapping(
|
||||
self.ctx.session, neutron_port_id, nsx_switch_id, nsx_port_id)
|
||||
result = (self.ctx.session.query(nsx_models.NeutronNsxPortMapping).
|
||||
filter_by(neutron_id=neutron_port_id).one())
|
||||
self.assertEqual(nsx_port_id, result.nsx_port_id)
|
||||
self.assertEqual(neutron_port_id, result.neutron_id)
|
||||
|
||||
def test_add_neutron_nsx_port_mapping_raise_on_duplicate_constraint(self):
|
||||
neutron_net_id = 'foo_neutron_network_id'
|
||||
neutron_port_id = 'foo_neutron_port_id'
|
||||
nsx_port_id_1 = 'foo_nsx_port_id_1'
|
||||
nsx_port_id_2 = 'foo_nsx_port_id_2'
|
||||
nsx_switch_id = 'foo_nsx_switch_id'
|
||||
self._setup_neutron_network_and_port(neutron_net_id, neutron_port_id)
|
||||
|
||||
nsx_db.add_neutron_nsx_port_mapping(
|
||||
self.ctx.session, neutron_port_id, nsx_switch_id, nsx_port_id_1)
|
||||
# Call the method twice to trigger a db duplicate constraint error,
|
||||
# this time with a different nsx port id!
|
||||
self.assertRaises(d_exc.DBDuplicateEntry,
|
||||
nsx_db.add_neutron_nsx_port_mapping,
|
||||
self.ctx.session, neutron_port_id,
|
||||
nsx_switch_id, nsx_port_id_2)
|
||||
|
||||
def test_add_neutron_nsx_port_mapping_raise_integrity_constraint(self):
|
||||
neutron_port_id = 'foo_neutron_port_id'
|
||||
nsx_port_id = 'foo_nsx_port_id'
|
||||
nsx_switch_id = 'foo_nsx_switch_id'
|
||||
self.assertRaises(d_exc.DBError,
|
||||
nsx_db.add_neutron_nsx_port_mapping,
|
||||
self.ctx.session, neutron_port_id,
|
||||
nsx_switch_id, nsx_port_id)
|
@ -1,15 +0,0 @@
|
||||
{
|
||||
"display_name": "%(display_name)s",
|
||||
"_href": "/ws.v1/gateway-service/%(uuid)s",
|
||||
"tags": %(tags_json)s,
|
||||
"_schema": "/ws.v1/schema/L2GatewayServiceConfig",
|
||||
"gateways": [
|
||||
{
|
||||
"transport_node_uuid": "%(transport_node_uuid)s",
|
||||
"type": "L2Gateway",
|
||||
"device_id": "%(device_id)s"
|
||||
}
|
||||
],
|
||||
"type": "L2GatewayServiceConfig",
|
||||
"uuid": "%(uuid)s"
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
{
|
||||
"display_name": "%(display_name)s",
|
||||
"uuid": "%(uuid)s",
|
||||
"type": "LogicalSwitchConfig",
|
||||
"_schema": "/ws.v1/schema/LogicalQueueConfig",
|
||||
"dscp": "%(dscp)s",
|
||||
"max_bandwidth_rate": "%(max_bandwidth_rate)s",
|
||||
"min_bandwidth_rate": "%(min_bandwidth_rate)s",
|
||||
"qos_marking": "%(qos_marking)s",
|
||||
"_href": "/ws.v1/lqueue/%(uuid)s"
|
||||
}
|
@ -1,29 +0,0 @@
|
||||
{
|
||||
"display_name": "%(display_name)s",
|
||||
%(distributed_json)s
|
||||
"uuid": "%(uuid)s",
|
||||
"tags": %(tags_json)s,
|
||||
"routing_config": {
|
||||
"type": "SingleDefaultRouteImplicitRoutingConfig",
|
||||
"_schema": "/ws.v1/schema/SingleDefaultRouteImplicitRoutingConfig",
|
||||
"default_route_next_hop": {
|
||||
"type": "RouterNextHop",
|
||||
"_schema": "/ws.v1/schema/RouterNextHop",
|
||||
"gateway_ip_address": "%(default_next_hop)s"
|
||||
}
|
||||
},
|
||||
"_schema": "/ws.v1/schema/LogicalRouterConfig",
|
||||
"_relations": {
|
||||
"LogicalRouterStatus": {
|
||||
"_href": "/ws.v1/lrouter/%(uuid)s/status",
|
||||
"lport_admin_up_count": %(lport_count)d,
|
||||
"_schema": "/ws.v1/schema/LogicalRouterStatus",
|
||||
"lport_count": %(lport_count)d,
|
||||
"fabric_status": %(status)s,
|
||||
"type": "LogicalRouterStatus",
|
||||
"lport_link_up_count": %(lport_count)d
|
||||
}
|
||||
},
|
||||
"type": "LogicalRouterConfig",
|
||||
"_href": "/ws.v1/lrouter/%(uuid)s"
|
||||
}
|
@ -1,12 +0,0 @@
|
||||
{
|
||||
"display_name": "%(display_name)s",
|
||||
"admin_status_enabled": "%(admin_status_enabled)s",
|
||||
"_href": "/ws.v1/lrouter/%(lr_uuid)s/lport/%(uuid)s",
|
||||
"tags":
|
||||
[{"scope": "q_port_id", "tag": "%(neutron_port_id)s"},
|
||||
{"scope": "os_tid", "tag": "%(tenant_id)s"}],
|
||||
"ip_addresses": %(ip_addresses_json)s,
|
||||
"_schema": "/ws.v1/schema/LogicalRouterPortConfig",
|
||||
"type": "LogicalRouterPortConfig",
|
||||
"uuid": "%(uuid)s"
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
{
|
||||
"LogicalPortAttachment":
|
||||
{
|
||||
%(peer_port_href_field)s
|
||||
%(peer_port_uuid_field)s
|
||||
%(l3_gateway_service_uuid_field)s
|
||||
%(vlan_id)s
|
||||
"type": "%(type)s",
|
||||
"schema": "/ws.v1/schema/%(type)s"
|
||||
}
|
||||
}
|
@ -1,6 +0,0 @@
|
||||
{
|
||||
"_href": "/ws.v1/lrouter/%(lr_uuid)s/nat/%(uuid)s",
|
||||
"type": "%(type)s",
|
||||
"match": %(match_json)s,
|
||||
"uuid": "%(uuid)s"
|
||||
}
|
@ -1,12 +0,0 @@
|
||||
{"display_name": "%(display_name)s",
|
||||
"_href": "/ws.v1/lswitch/%(uuid)s",
|
||||
"_schema": "/ws.v1/schema/LogicalSwitchConfig",
|
||||
"_relations": {"LogicalSwitchStatus":
|
||||
{"fabric_status": %(status)s,
|
||||
"type": "LogicalSwitchStatus",
|
||||
"lport_count": %(lport_count)d,
|
||||
"_href": "/ws.v1/lswitch/%(uuid)s/status",
|
||||
"_schema": "/ws.v1/schema/LogicalSwitchStatus"}},
|
||||
"type": "LogicalSwitchConfig",
|
||||
"tags": %(tags_json)s,
|
||||
"uuid": "%(uuid)s"}
|
@ -1,28 +0,0 @@
|
||||
{"display_name": "%(display_name)s",
|
||||
"_relations":
|
||||
{"LogicalPortStatus":
|
||||
{"type": "LogicalSwitchPortStatus",
|
||||
"admin_status_enabled": true,
|
||||
"fabric_status_up": %(status)s,
|
||||
"link_status_up": %(status)s,
|
||||
"_href": "/ws.v1/lswitch/%(ls_uuid)s/lport/%(uuid)s/status",
|
||||
"_schema": "/ws.v1/schema/LogicalSwitchPortStatus"},
|
||||
"LogicalSwitchConfig":
|
||||
{"uuid": "%(ls_uuid)s"},
|
||||
"LogicalPortAttachment":
|
||||
{
|
||||
"type": "%(att_type)s",
|
||||
%(att_info_json)s
|
||||
"schema": "/ws.v1/schema/%(att_type)s"
|
||||
}
|
||||
},
|
||||
"tags":
|
||||
[{"scope": "q_port_id", "tag": "%(neutron_port_id)s"},
|
||||
{"scope": "vm_id", "tag": "%(neutron_device_id)s"},
|
||||
{"scope": "os_tid", "tag": "%(tenant_id)s"}],
|
||||
"uuid": "%(uuid)s",
|
||||
"admin_status_enabled": "%(admin_status_enabled)s",
|
||||
"type": "LogicalSwitchPortConfig",
|
||||
"_schema": "/ws.v1/schema/LogicalSwitchPortConfig",
|
||||
"_href": "/ws.v1/lswitch/%(ls_uuid)s/lport/%(uuid)s"
|
||||
}
|
@ -1,7 +0,0 @@
|
||||
{
|
||||
"LogicalPortAttachment":
|
||||
{
|
||||
"type": "%(att_type)s",
|
||||
"schema": "/ws.v1/schema/%(att_type)s"
|
||||
}
|
||||
}
|
@ -1,23 +0,0 @@
|
||||
{"_href": "/ws.v1/lswitch/%(ls_uuid)s/lport/%(uuid)s",
|
||||
"lswitch":
|
||||
{"display_name": "%(ls_name)s",
|
||||
"uuid": "%(ls_uuid)s",
|
||||
"tags": [
|
||||
{"scope": "os_tid",
|
||||
"tag": "%(ls_tenant_id)s"}
|
||||
],
|
||||
"type": "LogicalSwitchConfig",
|
||||
"_schema": "/ws.v1/schema/LogicalSwitchConfig",
|
||||
"port_isolation_enabled": false,
|
||||
"transport_zones": [
|
||||
{"zone_uuid": "%(ls_zone_uuid)s",
|
||||
"transport_type": "stt"}
|
||||
],
|
||||
"_href": "/ws.v1/lswitch/%(ls_uuid)s"},
|
||||
"link_status_up": false,
|
||||
"_schema": "/ws.v1/schema/LogicalSwitchPortStatus",
|
||||
"admin_status_enabled": true,
|
||||
"fabric_status_up": true,
|
||||
"link_status_up": true,
|
||||
"type": "LogicalSwitchPortStatus"
|
||||
}
|
@ -1,10 +0,0 @@
|
||||
{
|
||||
"display_name": "%(display_name)s",
|
||||
"_href": "/ws.v1/security-profile/%(uuid)s",
|
||||
"tags": [{"scope": "os_tid", "tag": "%(tenant_id)s"},
|
||||
{"scope": "nova_spid", "tag": "%(nova_spid)s"}],
|
||||
"logical_port_egress_rules": %(logical_port_egress_rules_json)s,
|
||||
"_schema": "/ws.v1/schema/SecurityProfileConfig",
|
||||
"logical_port_ingress_rules": %(logical_port_ingress_rules_json)s,
|
||||
"uuid": "%(uuid)s"
|
||||
}
|
@ -1,13 +0,0 @@
|
||||
{
|
||||
"display_name": "%(display_name)s",
|
||||
"tags": [{"scope": "os_tid", "tag": "%(tenant_id)s"}],
|
||||
"gateways": [
|
||||
{
|
||||
"transport_node_uuid": "%(transport_node_uuid)s",
|
||||
"device_id": "%(device_id)s",
|
||||
"type": "L2Gateway"
|
||||
}
|
||||
],
|
||||
"type": "L2GatewayServiceConfig",
|
||||
"uuid": "%(uuid)s"
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
{
|
||||
"display_name": "%(display_name)s",
|
||||
"uuid": "%(uuid)s",
|
||||
"type": "LogicalSwitchConfig",
|
||||
"_schema": "/ws.v1/schema/LogicalQueueConfig",
|
||||
"dscp": "%(dscp)s",
|
||||
"max_bandwidth_rate": "%(max_bandwidth_rate)s",
|
||||
"min_bandwidth_rate": "%(min_bandwidth_rate)s",
|
||||
"qos_marking": "%(qos_marking)s",
|
||||
"_href": "/ws.v1/lqueue/%(uuid)s"
|
||||
}
|
@ -1,23 +0,0 @@
|
||||
{
|
||||
"display_name": "%(display_name)s",
|
||||
%(distributed_json)s
|
||||
"uuid": "%(uuid)s",
|
||||
"tags": [
|
||||
{
|
||||
"scope": "os_tid",
|
||||
"tag": "%(tenant_id)s"
|
||||
}
|
||||
],
|
||||
"routing_config": {
|
||||
"type": "SingleDefaultRouteImplicitRoutingConfig",
|
||||
"_schema": "/ws.v1/schema/SingleDefaultRouteImplicitRoutingConfig",
|
||||
"default_route_next_hop": {
|
||||
"type": "RouterNextHop",
|
||||
"_schema": "/ws.v1/schema/RouterNextHop",
|
||||
"gateway_ip_address": "%(default_next_hop)s"
|
||||
}
|
||||
},
|
||||
"_schema": "/ws.v1/schema/LogicalRouterConfig",
|
||||
"type": "LogicalRouterConfig",
|
||||
"_href": "/ws.v1/lrouter/%(uuid)s"
|
||||
}
|
@ -1,10 +0,0 @@
|
||||
{
|
||||
"display_name": "%(display_name)s",
|
||||
"_href": "/ws.v1/lrouter/%(lr_uuid)s/lport/%(uuid)s",
|
||||
"_schema": "/ws.v1/schema/LogicalRouterPortConfig",
|
||||
"mac_address": "00:00:00:00:00:00",
|
||||
"admin_status_enabled": true,
|
||||
"ip_addresses": %(ip_addresses_json)s,
|
||||
"type": "LogicalRouterPortConfig",
|
||||
"uuid": "%(uuid)s"
|
||||
}
|
@ -1,6 +0,0 @@
|
||||
{
|
||||
"_href": "/ws.v1/lrouter/%(lr_uuid)s/nat/%(uuid)s",
|
||||
"type": "%(type)s",
|
||||
"match": %(match_json)s,
|
||||
"uuid": "%(uuid)s"
|
||||
}
|
@ -1,12 +0,0 @@
|
||||
{
|
||||
"display_name": "%(display_name)s",
|
||||
"uuid": "%(uuid)s",
|
||||
"tags": [{"scope": "os_tid", "tag": "%(tenant_id)s"}],
|
||||
"type": "LogicalSwitchConfig",
|
||||
"_schema": "/ws.v1/schema/LogicalSwitchConfig",
|
||||
"port_isolation_enabled": false,
|
||||
"transport_zones": [
|
||||
{"zone_uuid": "%(zone_uuid)s",
|
||||
"transport_type": "stt"}],
|
||||
"_href": "/ws.v1/lswitch/%(uuid)s"
|
||||
}
|
@ -1,17 +0,0 @@
|
||||
{
|
||||
"display_name": "%(uuid)s",
|
||||
"_href": "/ws.v1/lswitch/%(ls_uuid)s/lport/%(uuid)s",
|
||||
"security_profiles": [],
|
||||
"tags":
|
||||
[{"scope": "q_port_id", "tag": "%(neutron_port_id)s"},
|
||||
{"scope": "vm_id", "tag": "%(neutron_device_id)s"},
|
||||
{"scope": "os_tid", "tag": "%(tenant_id)s"}],
|
||||
"portno": 1,
|
||||
"queue_uuid": null,
|
||||
"_schema": "/ws.v1/schema/LogicalSwitchPortConfig",
|
||||
"mirror_targets": [],
|
||||
"allowed_address_pairs": [],
|
||||
"admin_status_enabled": true,
|
||||
"type": "LogicalSwitchPortConfig",
|
||||
"uuid": "%(uuid)s"
|
||||
}
|
@ -1,10 +0,0 @@
|
||||
{
|
||||
"display_name": "%(display_name)s",
|
||||
"_href": "/ws.v1/security-profile/%(uuid)s",
|
||||
"tags": [{"scope": "os_tid", "tag": "%(tenant_id)s"},
|
||||
{"scope": "nova_spid", "tag": "%(nova_spid)s"}],
|
||||
"logical_port_egress_rules": [],
|
||||
"_schema": "/ws.v1/schema/SecurityProfileConfig",
|
||||
"logical_port_ingress_rules": [],
|
||||
"uuid": "%(uuid)s"
|
||||
}
|
@ -1,12 +0,0 @@
|
||||
{
|
||||
"LogicalPortAttachment":
|
||||
{
|
||||
%(peer_port_href_field)s
|
||||
%(peer_port_uuid_field)s
|
||||
%(l3_gateway_service_uuid_field)s
|
||||
%(vlan_id_field)s
|
||||
"_href": "/ws.v1/lrouter/%(lr_uuid)s/lport/%(lp_uuid)s/attachment",
|
||||
"type": "%(type)s",
|
||||
"schema": "/ws.v1/schema/%(type)s"
|
||||
}
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
{
|
||||
"LogicalPortAttachment":
|
||||
{
|
||||
%(peer_port_href_field)s
|
||||
%(peer_port_uuid_field)s
|
||||
%(vif_uuid_field)s
|
||||
"_href": "/ws.v1/lswitch/%(ls_uuid)s/lport/%(lp_uuid)s/attachment",
|
||||
"type": "%(type)s",
|
||||
"schema": "/ws.v1/schema/%(type)s"
|
||||
}
|
||||
}
|
@ -1,26 +0,0 @@
|
||||
[DEFAULT]
|
||||
# Show more verbose log output (sets INFO log level output)
|
||||
verbose = True
|
||||
|
||||
# Show debugging output in logs (sets DEBUG log level output)
|
||||
debug = False
|
||||
|
||||
# Address to bind the API server
|
||||
bind_host = 0.0.0.0
|
||||
|
||||
# Port the bind the API server to
|
||||
bind_port = 9696
|
||||
|
||||
# MISSING Path to the extensions
|
||||
# api_extensions_path =
|
||||
|
||||
# Paste configuration file
|
||||
api_paste_config = api-paste.ini.test
|
||||
|
||||
# The messaging module to use, defaults to kombu.
|
||||
rpc_backend = fake
|
||||
|
||||
lock_path = $state_path/lock
|
||||
|
||||
[database]
|
||||
connection = 'sqlite://'
|
@ -1,16 +0,0 @@
|
||||
[DEFAULT]
|
||||
default_tz_uuid = fake_tz_uuid
|
||||
nova_zone_id = whatever
|
||||
nsx_controllers = fake_1, fake_2
|
||||
nsx_user = foo
|
||||
nsx_password = bar
|
||||
default_l3_gw_service_uuid = whatever
|
||||
default_l2_gw_service_uuid = whatever
|
||||
default_service_cluster_uuid = whatever
|
||||
default_interface_name = whatever
|
||||
http_timeout = 13
|
||||
redirects = 12
|
||||
retries = 11
|
||||
|
||||
[NSX]
|
||||
agent_mode = agentless
|
@ -1,5 +0,0 @@
|
||||
[DEFAULT]
|
||||
default_tz_uuid = fake_tz_uuid
|
||||
nsx_controllers=fake_1,fake_2
|
||||
nsx_user=foo
|
||||
nsx_password=bar
|
@ -1,16 +0,0 @@
|
||||
[DEFAULT]
|
||||
default_tz_uuid = fake_tz_uuid
|
||||
nova_zone_id = whatever
|
||||
nsx_controllers = fake_1, fake_2
|
||||
nsx_user = foo
|
||||
nsx_password = bar
|
||||
default_l3_gw_service_uuid = whatever
|
||||
default_l2_gw_service_uuid = whatever
|
||||
default_service_cluster_uuid = whatever
|
||||
default_interface_name = whatever
|
||||
http_timeout = 13
|
||||
redirects = 12
|
||||
retries = 11
|
||||
|
||||
[NSX]
|
||||
agent_mode = combined
|
@ -1,12 +0,0 @@
|
||||
[DEFAULT]
|
||||
default_tz_uuid = fake_tz_uuid
|
||||
nova_zone_id = whatever
|
||||
nsx_controllers = fake_1, fake_2
|
||||
nsx_user = foo
|
||||
nsx_password = bar
|
||||
default_l3_gw_service_uuid = whatever
|
||||
default_l2_gw_service_uuid = whatever
|
||||
default_interface_name = whatever
|
||||
http_timeout = 13
|
||||
redirects = 12
|
||||
retries = 11
|
@ -1,7 +0,0 @@
|
||||
[DEFAULT]
|
||||
default_tz_uuid = fake_tz_uuid
|
||||
nsx_controllers=fake_1, fake_2
|
||||
nsx_user=foo
|
||||
nsx_password=bar
|
||||
default_l3_gw_service_uuid = whatever
|
||||
default_l2_gw_service_uuid = whatever
|
@ -1,12 +0,0 @@
|
||||
[DEFAULT]
|
||||
default_tz_uuid = fake_tz_uuid
|
||||
nova_zone_id = whatever
|
||||
nvp_controllers = fake_1, fake_2
|
||||
nvp_user = foo
|
||||
nvp_password = bar
|
||||
default_l3_gw_service_uuid = whatever
|
||||
default_l2_gw_service_uuid = whatever
|
||||
default_interface_name = whatever
|
||||
http_timeout = 3
|
||||
redirects = 2
|
||||
retries = 2
|
@ -1,9 +0,0 @@
|
||||
[vcns]
|
||||
manager_uri = https://fake-host
|
||||
user = fake-user
|
||||
passwordd = fake-password
|
||||
datacenter_moid = fake-moid
|
||||
resource_pool_id = fake-resgroup
|
||||
datastore_id = fake-datastore
|
||||
external_network = fake-ext-net
|
||||
task_status_check_interval = 100
|
@ -1,31 +0,0 @@
|
||||
# Copyright (c) 2014 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron.extensions import allowedaddresspairs as addr_pair
|
||||
from neutron.tests.unit import test_extension_allowedaddresspairs as ext_pairs
|
||||
from neutron.tests.unit.vmware import test_nsx_plugin
|
||||
|
||||
|
||||
class TestAllowedAddressPairs(test_nsx_plugin.NsxPluginV2TestCase,
|
||||
ext_pairs.TestAllowedAddressPairs):
|
||||
|
||||
# TODO(arosen): move to ext_pairs.TestAllowedAddressPairs once all
|
||||
# plugins do this correctly.
|
||||
def test_create_port_no_allowed_address_pairs(self):
|
||||
with self.network() as net:
|
||||
res = self._create_port(self.fmt, net['network']['id'])
|
||||
port = self.deserialize(self.fmt, res)
|
||||
self.assertEqual(port['port'][addr_pair.ADDRESS_PAIRS], [])
|
||||
self._delete('ports', port['port']['id'])
|
@ -1,139 +0,0 @@
|
||||
# Copyright (c) 2013 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import contextlib
|
||||
import mock
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
from neutron.api.v2 import attributes
|
||||
from neutron.common import test_lib
|
||||
from neutron import context
|
||||
from neutron.extensions import agent
|
||||
from neutron.plugins.vmware.api_client import version
|
||||
from neutron.plugins.vmware.common import sync
|
||||
from neutron.tests.unit import test_db_plugin
|
||||
from neutron.tests.unit import vmware
|
||||
from neutron.tests.unit.vmware.apiclient import fake
|
||||
|
||||
|
||||
class MacLearningExtensionManager(object):
|
||||
|
||||
def get_resources(self):
|
||||
# Add the resources to the global attribute map
|
||||
# This is done here as the setup process won't
|
||||
# initialize the main API router which extends
|
||||
# the global attribute map
|
||||
attributes.RESOURCE_ATTRIBUTE_MAP.update(
|
||||
agent.RESOURCE_ATTRIBUTE_MAP)
|
||||
return agent.Agent.get_resources()
|
||||
|
||||
def get_actions(self):
|
||||
return []
|
||||
|
||||
def get_request_extensions(self):
|
||||
return []
|
||||
|
||||
|
||||
class MacLearningDBTestCase(test_db_plugin.NeutronDbPluginV2TestCase):
|
||||
fmt = 'json'
|
||||
|
||||
def setUp(self):
|
||||
test_lib.test_config['config_files'] = [
|
||||
vmware.get_fake_conf('nsx.ini.full.test')]
|
||||
cfg.CONF.set_override('api_extensions_path', vmware.NSXEXT_PATH)
|
||||
# Save the original RESOURCE_ATTRIBUTE_MAP
|
||||
self.saved_attr_map = {}
|
||||
for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems():
|
||||
self.saved_attr_map[resource] = attrs.copy()
|
||||
ext_mgr = MacLearningExtensionManager()
|
||||
# mock api client
|
||||
self.fc = fake.FakeClient(vmware.STUBS_PATH)
|
||||
self.mock_nsx = mock.patch(vmware.NSXAPI_NAME, autospec=True)
|
||||
instance = self.mock_nsx.start()
|
||||
# Avoid runs of the synchronizer looping call
|
||||
patch_sync = mock.patch.object(sync, '_start_loopingcall')
|
||||
patch_sync.start()
|
||||
|
||||
# Emulate tests against NSX 2.x
|
||||
instance.return_value.get_version.return_value = version.Version("3.0")
|
||||
instance.return_value.request.side_effect = self.fc.fake_request
|
||||
cfg.CONF.set_override('metadata_mode', None, 'NSX')
|
||||
self.addCleanup(self.fc.reset_all)
|
||||
self.addCleanup(self.restore_resource_attribute_map)
|
||||
super(MacLearningDBTestCase, self).setUp(plugin=vmware.PLUGIN_NAME,
|
||||
ext_mgr=ext_mgr)
|
||||
self.adminContext = context.get_admin_context()
|
||||
|
||||
def restore_resource_attribute_map(self):
|
||||
# Restore the original RESOURCE_ATTRIBUTE_MAP
|
||||
attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map
|
||||
|
||||
def test_create_with_mac_learning(self):
|
||||
with self.port(arg_list=('mac_learning_enabled',),
|
||||
mac_learning_enabled=True) as port:
|
||||
# Validate create operation response
|
||||
self.assertEqual(True, port['port']['mac_learning_enabled'])
|
||||
# Verify that db operation successfully set mac learning state
|
||||
req = self.new_show_request('ports', port['port']['id'], self.fmt)
|
||||
sport = self.deserialize(self.fmt, req.get_response(self.api))
|
||||
self.assertEqual(True, sport['port']['mac_learning_enabled'])
|
||||
|
||||
def test_create_and_show_port_without_mac_learning(self):
|
||||
with self.port() as port:
|
||||
req = self.new_show_request('ports', port['port']['id'], self.fmt)
|
||||
sport = self.deserialize(self.fmt, req.get_response(self.api))
|
||||
self.assertNotIn('mac_learning_enabled', sport['port'])
|
||||
|
||||
def test_update_port_with_mac_learning(self):
|
||||
with self.port(arg_list=('mac_learning_enabled',),
|
||||
mac_learning_enabled=False) as port:
|
||||
data = {'port': {'mac_learning_enabled': True}}
|
||||
req = self.new_update_request('ports', data, port['port']['id'])
|
||||
res = self.deserialize(self.fmt, req.get_response(self.api))
|
||||
self.assertEqual(True, res['port']['mac_learning_enabled'])
|
||||
|
||||
def test_update_preexisting_port_with_mac_learning(self):
|
||||
with self.port() as port:
|
||||
req = self.new_show_request('ports', port['port']['id'], self.fmt)
|
||||
sport = self.deserialize(self.fmt, req.get_response(self.api))
|
||||
self.assertNotIn('mac_learning_enabled', sport['port'])
|
||||
data = {'port': {'mac_learning_enabled': True}}
|
||||
req = self.new_update_request('ports', data, port['port']['id'])
|
||||
# Validate update operation response
|
||||
res = self.deserialize(self.fmt, req.get_response(self.api))
|
||||
self.assertEqual(True, res['port']['mac_learning_enabled'])
|
||||
# Verify that db operation successfully updated mac learning state
|
||||
req = self.new_show_request('ports', port['port']['id'], self.fmt)
|
||||
sport = self.deserialize(self.fmt, req.get_response(self.api))
|
||||
self.assertEqual(True, sport['port']['mac_learning_enabled'])
|
||||
|
||||
def test_list_ports(self):
|
||||
# for this test we need to enable overlapping ips
|
||||
cfg.CONF.set_default('allow_overlapping_ips', True)
|
||||
with contextlib.nested(self.port(arg_list=('mac_learning_enabled',),
|
||||
mac_learning_enabled=True),
|
||||
self.port(arg_list=('mac_learning_enabled',),
|
||||
mac_learning_enabled=True),
|
||||
self.port(arg_list=('mac_learning_enabled',),
|
||||
mac_learning_enabled=True)):
|
||||
for port in self._list('ports')['ports']:
|
||||
self.assertEqual(True, port['mac_learning_enabled'])
|
||||
|
||||
def test_show_port(self):
|
||||
with self.port(arg_list=('mac_learning_enabled',),
|
||||
mac_learning_enabled=True) as p:
|
||||
port_res = self._show('ports', p['port']['id'])['port']
|
||||
self.assertEqual(True, port_res['mac_learning_enabled'])
|
File diff suppressed because it is too large
Load Diff
@ -1,47 +0,0 @@
|
||||
# Copyright (c) 2014 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import mock
|
||||
|
||||
from neutron.common import test_lib
|
||||
from neutron.plugins.vmware.common import sync
|
||||
from neutron.tests.unit import test_extension_portsecurity as psec
|
||||
from neutron.tests.unit import vmware
|
||||
from neutron.tests.unit.vmware.apiclient import fake
|
||||
|
||||
|
||||
class PortSecurityTestCase(psec.PortSecurityDBTestCase):
|
||||
|
||||
def setUp(self):
|
||||
test_lib.test_config['config_files'] = [
|
||||
vmware.get_fake_conf('nsx.ini.test')]
|
||||
# mock api client
|
||||
self.fc = fake.FakeClient(vmware.STUBS_PATH)
|
||||
self.mock_nsx = mock.patch(vmware.NSXAPI_NAME, autospec=True)
|
||||
instance = self.mock_nsx.start()
|
||||
instance.return_value.login.return_value = "the_cookie"
|
||||
# Avoid runs of the synchronizer looping call
|
||||
patch_sync = mock.patch.object(sync, '_start_loopingcall')
|
||||
patch_sync.start()
|
||||
|
||||
instance.return_value.request.side_effect = self.fc.fake_request
|
||||
super(PortSecurityTestCase, self).setUp(vmware.PLUGIN_NAME)
|
||||
self.addCleanup(self.fc.reset_all)
|
||||
self.addCleanup(self.mock_nsx.stop)
|
||||
self.addCleanup(patch_sync.stop)
|
||||
|
||||
|
||||
class TestPortSecurity(PortSecurityTestCase, psec.TestPortSecurity):
|
||||
pass
|
@ -1,176 +0,0 @@
|
||||
# Copyright (c) 2014 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
import webob.exc
|
||||
|
||||
from neutron.extensions import multiprovidernet as mpnet
|
||||
from neutron.extensions import providernet as pnet
|
||||
from neutron.tests.unit import vmware
|
||||
from neutron.tests.unit.vmware import test_nsx_plugin
|
||||
|
||||
|
||||
class TestProvidernet(test_nsx_plugin.NsxPluginV2TestCase):
|
||||
|
||||
def test_create_delete_provider_network_default_physical_net(self):
|
||||
data = {'network': {'name': 'net1',
|
||||
'admin_state_up': True,
|
||||
'tenant_id': 'admin',
|
||||
pnet.NETWORK_TYPE: 'vlan',
|
||||
pnet.SEGMENTATION_ID: 411}}
|
||||
network_req = self.new_create_request('networks', data, self.fmt)
|
||||
net = self.deserialize(self.fmt, network_req.get_response(self.api))
|
||||
self.assertEqual(net['network'][pnet.NETWORK_TYPE], 'vlan')
|
||||
self.assertEqual(net['network'][pnet.SEGMENTATION_ID], 411)
|
||||
req = self.new_delete_request('networks', net['network']['id'])
|
||||
res = req.get_response(self.api)
|
||||
self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code)
|
||||
|
||||
def test_create_provider_network(self):
|
||||
data = {'network': {'name': 'net1',
|
||||
'admin_state_up': True,
|
||||
'tenant_id': 'admin',
|
||||
pnet.NETWORK_TYPE: 'vlan',
|
||||
pnet.SEGMENTATION_ID: 411,
|
||||
pnet.PHYSICAL_NETWORK: 'physnet1'}}
|
||||
network_req = self.new_create_request('networks', data, self.fmt)
|
||||
net = self.deserialize(self.fmt, network_req.get_response(self.api))
|
||||
self.assertEqual(net['network'][pnet.NETWORK_TYPE], 'vlan')
|
||||
self.assertEqual(net['network'][pnet.SEGMENTATION_ID], 411)
|
||||
self.assertEqual(net['network'][pnet.PHYSICAL_NETWORK], 'physnet1')
|
||||
|
||||
# Test that we can create another provider network using the same
|
||||
# vlan_id on another physical network.
|
||||
data['network'][pnet.PHYSICAL_NETWORK] = 'physnet2'
|
||||
network_req = self.new_create_request('networks', data, self.fmt)
|
||||
net = self.deserialize(self.fmt, network_req.get_response(self.api))
|
||||
self.assertEqual(net['network'][pnet.NETWORK_TYPE], 'vlan')
|
||||
self.assertEqual(net['network'][pnet.SEGMENTATION_ID], 411)
|
||||
self.assertEqual(net['network'][pnet.PHYSICAL_NETWORK], 'physnet2')
|
||||
|
||||
|
||||
class TestMultiProviderNetworks(test_nsx_plugin.NsxPluginV2TestCase):
|
||||
|
||||
def setUp(self, plugin=None):
|
||||
cfg.CONF.set_override('api_extensions_path', vmware.NSXEXT_PATH)
|
||||
super(TestMultiProviderNetworks, self).setUp()
|
||||
|
||||
def test_create_network_provider(self):
|
||||
data = {'network': {'name': 'net1',
|
||||
pnet.NETWORK_TYPE: 'vlan',
|
||||
pnet.PHYSICAL_NETWORK: 'physnet1',
|
||||
pnet.SEGMENTATION_ID: 1,
|
||||
'tenant_id': 'tenant_one'}}
|
||||
network_req = self.new_create_request('networks', data)
|
||||
network = self.deserialize(self.fmt,
|
||||
network_req.get_response(self.api))
|
||||
self.assertEqual(network['network'][pnet.NETWORK_TYPE], 'vlan')
|
||||
self.assertEqual(network['network'][pnet.PHYSICAL_NETWORK], 'physnet1')
|
||||
self.assertEqual(network['network'][pnet.SEGMENTATION_ID], 1)
|
||||
self.assertNotIn(mpnet.SEGMENTS, network['network'])
|
||||
|
||||
def test_create_network_provider_flat(self):
|
||||
data = {'network': {'name': 'net1',
|
||||
pnet.NETWORK_TYPE: 'flat',
|
||||
pnet.PHYSICAL_NETWORK: 'physnet1',
|
||||
'tenant_id': 'tenant_one'}}
|
||||
network_req = self.new_create_request('networks', data)
|
||||
network = self.deserialize(self.fmt,
|
||||
network_req.get_response(self.api))
|
||||
self.assertEqual('flat', network['network'][pnet.NETWORK_TYPE])
|
||||
self.assertEqual('physnet1', network['network'][pnet.PHYSICAL_NETWORK])
|
||||
self.assertEqual(0, network['network'][pnet.SEGMENTATION_ID])
|
||||
self.assertNotIn(mpnet.SEGMENTS, network['network'])
|
||||
|
||||
def test_create_network_single_multiple_provider(self):
|
||||
data = {'network': {'name': 'net1',
|
||||
mpnet.SEGMENTS:
|
||||
[{pnet.NETWORK_TYPE: 'vlan',
|
||||
pnet.PHYSICAL_NETWORK: 'physnet1',
|
||||
pnet.SEGMENTATION_ID: 1}],
|
||||
'tenant_id': 'tenant_one'}}
|
||||
net_req = self.new_create_request('networks', data)
|
||||
network = self.deserialize(self.fmt, net_req.get_response(self.api))
|
||||
for provider_field in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK,
|
||||
pnet.SEGMENTATION_ID]:
|
||||
self.assertNotIn(provider_field, network['network'])
|
||||
tz = network['network'][mpnet.SEGMENTS][0]
|
||||
self.assertEqual(tz[pnet.NETWORK_TYPE], 'vlan')
|
||||
self.assertEqual(tz[pnet.PHYSICAL_NETWORK], 'physnet1')
|
||||
self.assertEqual(tz[pnet.SEGMENTATION_ID], 1)
|
||||
|
||||
# Tests get_network()
|
||||
net_req = self.new_show_request('networks', network['network']['id'])
|
||||
network = self.deserialize(self.fmt, net_req.get_response(self.api))
|
||||
tz = network['network'][mpnet.SEGMENTS][0]
|
||||
self.assertEqual(tz[pnet.NETWORK_TYPE], 'vlan')
|
||||
self.assertEqual(tz[pnet.PHYSICAL_NETWORK], 'physnet1')
|
||||
self.assertEqual(tz[pnet.SEGMENTATION_ID], 1)
|
||||
|
||||
def test_create_network_multprovider(self):
|
||||
data = {'network': {'name': 'net1',
|
||||
mpnet.SEGMENTS:
|
||||
[{pnet.NETWORK_TYPE: 'vlan',
|
||||
pnet.PHYSICAL_NETWORK: 'physnet1',
|
||||
pnet.SEGMENTATION_ID: 1},
|
||||
{pnet.NETWORK_TYPE: 'stt',
|
||||
pnet.PHYSICAL_NETWORK: 'physnet1'}],
|
||||
'tenant_id': 'tenant_one'}}
|
||||
network_req = self.new_create_request('networks', data)
|
||||
network = self.deserialize(self.fmt,
|
||||
network_req.get_response(self.api))
|
||||
tz = network['network'][mpnet.SEGMENTS]
|
||||
for tz in data['network'][mpnet.SEGMENTS]:
|
||||
for field in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK,
|
||||
pnet.SEGMENTATION_ID]:
|
||||
self.assertEqual(tz.get(field), tz.get(field))
|
||||
|
||||
# Tests get_network()
|
||||
net_req = self.new_show_request('networks', network['network']['id'])
|
||||
network = self.deserialize(self.fmt, net_req.get_response(self.api))
|
||||
tz = network['network'][mpnet.SEGMENTS]
|
||||
for tz in data['network'][mpnet.SEGMENTS]:
|
||||
for field in [pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK,
|
||||
pnet.SEGMENTATION_ID]:
|
||||
self.assertEqual(tz.get(field), tz.get(field))
|
||||
|
||||
def test_create_network_with_provider_and_multiprovider_fail(self):
|
||||
data = {'network': {'name': 'net1',
|
||||
mpnet.SEGMENTS:
|
||||
[{pnet.NETWORK_TYPE: 'vlan',
|
||||
pnet.PHYSICAL_NETWORK: 'physnet1',
|
||||
pnet.SEGMENTATION_ID: 1}],
|
||||
pnet.NETWORK_TYPE: 'vlan',
|
||||
pnet.PHYSICAL_NETWORK: 'physnet1',
|
||||
pnet.SEGMENTATION_ID: 1,
|
||||
'tenant_id': 'tenant_one'}}
|
||||
|
||||
network_req = self.new_create_request('networks', data)
|
||||
res = network_req.get_response(self.api)
|
||||
self.assertEqual(res.status_int, 400)
|
||||
|
||||
def test_create_network_duplicate_segments(self):
|
||||
data = {'network': {'name': 'net1',
|
||||
mpnet.SEGMENTS:
|
||||
[{pnet.NETWORK_TYPE: 'vlan',
|
||||
pnet.PHYSICAL_NETWORK: 'physnet1',
|
||||
pnet.SEGMENTATION_ID: 1},
|
||||
{pnet.NETWORK_TYPE: 'vlan',
|
||||
pnet.PHYSICAL_NETWORK: 'physnet1',
|
||||
pnet.SEGMENTATION_ID: 1}],
|
||||
'tenant_id': 'tenant_one'}}
|
||||
network_req = self.new_create_request('networks', data)
|
||||
res = network_req.get_response(self.api)
|
||||
self.assertEqual(res.status_int, 400)
|
@ -1,276 +0,0 @@
|
||||
# Copyright (c) 2014 OpenStack Foundation.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import contextlib
|
||||
|
||||
import mock
|
||||
from oslo_config import cfg
|
||||
import webob.exc
|
||||
|
||||
from neutron import context
|
||||
from neutron.plugins.vmware.dbexts import qos_db
|
||||
from neutron.plugins.vmware.extensions import qos as ext_qos
|
||||
from neutron.plugins.vmware import nsxlib
|
||||
from neutron.tests.unit import test_extensions
|
||||
from neutron.tests.unit import vmware
|
||||
from neutron.tests.unit.vmware import test_nsx_plugin
|
||||
|
||||
|
||||
class QoSTestExtensionManager(object):
|
||||
|
||||
def get_resources(self):
|
||||
return ext_qos.Qos.get_resources()
|
||||
|
||||
def get_actions(self):
|
||||
return []
|
||||
|
||||
def get_request_extensions(self):
|
||||
return []
|
||||
|
||||
|
||||
class TestQoSQueue(test_nsx_plugin.NsxPluginV2TestCase):
|
||||
|
||||
def setUp(self, plugin=None):
|
||||
cfg.CONF.set_override('api_extensions_path', vmware.NSXEXT_PATH)
|
||||
super(TestQoSQueue, self).setUp()
|
||||
ext_mgr = QoSTestExtensionManager()
|
||||
self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr)
|
||||
|
||||
def _create_qos_queue(self, fmt, body, **kwargs):
|
||||
qos_queue = self.new_create_request('qos-queues', body)
|
||||
if (kwargs.get('set_context') and 'tenant_id' in kwargs):
|
||||
# create a specific auth context for this request
|
||||
qos_queue.environ['neutron.context'] = context.Context(
|
||||
'', kwargs['tenant_id'])
|
||||
|
||||
return qos_queue.get_response(self.ext_api)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def qos_queue(self, name='foo', min='0', max='10',
|
||||
qos_marking=None, dscp='0', default=None):
|
||||
|
||||
body = {'qos_queue': {'tenant_id': 'tenant',
|
||||
'name': name,
|
||||
'min': min,
|
||||
'max': max}}
|
||||
|
||||
if qos_marking:
|
||||
body['qos_queue']['qos_marking'] = qos_marking
|
||||
if dscp:
|
||||
body['qos_queue']['dscp'] = dscp
|
||||
if default:
|
||||
body['qos_queue']['default'] = default
|
||||
res = self._create_qos_queue('json', body)
|
||||
qos_queue = self.deserialize('json', res)
|
||||
if res.status_int >= 400:
|
||||
raise webob.exc.HTTPClientError(code=res.status_int)
|
||||
|
||||
yield qos_queue
|
||||
|
||||
def test_create_qos_queue(self):
|
||||
with self.qos_queue(name='fake_lqueue', min=34, max=44,
|
||||
qos_marking='untrusted', default=False) as q:
|
||||
self.assertEqual(q['qos_queue']['name'], 'fake_lqueue')
|
||||
self.assertEqual(q['qos_queue']['min'], 34)
|
||||
self.assertEqual(q['qos_queue']['max'], 44)
|
||||
self.assertEqual(q['qos_queue']['qos_marking'], 'untrusted')
|
||||
self.assertFalse(q['qos_queue']['default'])
|
||||
|
||||
def test_create_trusted_qos_queue(self):
|
||||
with mock.patch.object(qos_db.LOG, 'info') as log:
|
||||
with mock.patch.object(nsxlib, 'do_request',
|
||||
return_value={"uuid": "fake_queue"}):
|
||||
with self.qos_queue(name='fake_lqueue', min=34, max=44,
|
||||
qos_marking='trusted', default=False) as q:
|
||||
self.assertIsNone(q['qos_queue']['dscp'])
|
||||
self.assertTrue(log.called)
|
||||
|
||||
def test_create_qos_queue_name_exceeds_40_chars(self):
|
||||
name = 'this_is_a_queue_whose_name_is_longer_than_40_chars'
|
||||
with self.qos_queue(name=name) as queue:
|
||||
# Assert Neutron name is not truncated
|
||||
self.assertEqual(queue['qos_queue']['name'], name)
|
||||
|
||||
def test_create_qos_queue_default(self):
|
||||
with self.qos_queue(default=True) as q:
|
||||
self.assertTrue(q['qos_queue']['default'])
|
||||
|
||||
def test_create_qos_queue_two_default_queues_fail(self):
|
||||
with self.qos_queue(default=True):
|
||||
body = {'qos_queue': {'tenant_id': 'tenant',
|
||||
'name': 'second_default_queue',
|
||||
'default': True}}
|
||||
res = self._create_qos_queue('json', body)
|
||||
self.assertEqual(res.status_int, 409)
|
||||
|
||||
def test_create_port_with_queue(self):
|
||||
with self.qos_queue(default=True) as q1:
|
||||
res = self._create_network('json', 'net1', True,
|
||||
arg_list=(ext_qos.QUEUE,),
|
||||
queue_id=q1['qos_queue']['id'])
|
||||
net1 = self.deserialize('json', res)
|
||||
self.assertEqual(net1['network'][ext_qos.QUEUE],
|
||||
q1['qos_queue']['id'])
|
||||
device_id = "00fff4d0-e4a8-4a3a-8906-4c4cdafb59f1"
|
||||
with self.port(device_id=device_id) as p:
|
||||
self.assertEqual(len(p['port'][ext_qos.QUEUE]), 36)
|
||||
|
||||
def test_create_shared_queue_networks(self):
|
||||
with self.qos_queue(default=True) as q1:
|
||||
res = self._create_network('json', 'net1', True,
|
||||
arg_list=(ext_qos.QUEUE,),
|
||||
queue_id=q1['qos_queue']['id'])
|
||||
net1 = self.deserialize('json', res)
|
||||
self.assertEqual(net1['network'][ext_qos.QUEUE],
|
||||
q1['qos_queue']['id'])
|
||||
res = self._create_network('json', 'net2', True,
|
||||
arg_list=(ext_qos.QUEUE,),
|
||||
queue_id=q1['qos_queue']['id'])
|
||||
net2 = self.deserialize('json', res)
|
||||
self.assertEqual(net1['network'][ext_qos.QUEUE],
|
||||
q1['qos_queue']['id'])
|
||||
device_id = "00fff4d0-e4a8-4a3a-8906-4c4cdafb59f1"
|
||||
res = self._create_port('json', net1['network']['id'],
|
||||
device_id=device_id)
|
||||
port1 = self.deserialize('json', res)
|
||||
res = self._create_port('json', net2['network']['id'],
|
||||
device_id=device_id)
|
||||
port2 = self.deserialize('json', res)
|
||||
self.assertEqual(port1['port'][ext_qos.QUEUE],
|
||||
port2['port'][ext_qos.QUEUE])
|
||||
|
||||
self._delete('ports', port1['port']['id'])
|
||||
self._delete('ports', port2['port']['id'])
|
||||
|
||||
def test_remove_queue_in_use_fail(self):
|
||||
with self.qos_queue() as q1:
|
||||
res = self._create_network('json', 'net1', True,
|
||||
arg_list=(ext_qos.QUEUE,),
|
||||
queue_id=q1['qos_queue']['id'])
|
||||
net1 = self.deserialize('json', res)
|
||||
device_id = "00fff4d0-e4a8-4a3a-8906-4c4cdafb59f1"
|
||||
res = self._create_port('json', net1['network']['id'],
|
||||
device_id=device_id)
|
||||
port = self.deserialize('json', res)
|
||||
self._delete('qos-queues', port['port'][ext_qos.QUEUE], 409)
|
||||
|
||||
def test_update_network_new_queue(self):
|
||||
with self.qos_queue() as q1:
|
||||
res = self._create_network('json', 'net1', True,
|
||||
arg_list=(ext_qos.QUEUE,),
|
||||
queue_id=q1['qos_queue']['id'])
|
||||
net1 = self.deserialize('json', res)
|
||||
with self.qos_queue() as new_q:
|
||||
data = {'network': {ext_qos.QUEUE: new_q['qos_queue']['id']}}
|
||||
req = self.new_update_request('networks', data,
|
||||
net1['network']['id'])
|
||||
res = req.get_response(self.api)
|
||||
net1 = self.deserialize('json', res)
|
||||
self.assertEqual(net1['network'][ext_qos.QUEUE],
|
||||
new_q['qos_queue']['id'])
|
||||
|
||||
def test_update_port_adding_device_id(self):
|
||||
with self.qos_queue() as q1:
|
||||
res = self._create_network('json', 'net1', True,
|
||||
arg_list=(ext_qos.QUEUE,),
|
||||
queue_id=q1['qos_queue']['id'])
|
||||
net1 = self.deserialize('json', res)
|
||||
device_id = "00fff4d0-e4a8-4a3a-8906-4c4cdafb59f1"
|
||||
res = self._create_port('json', net1['network']['id'])
|
||||
port = self.deserialize('json', res)
|
||||
self.assertIsNone(port['port'][ext_qos.QUEUE])
|
||||
|
||||
data = {'port': {'device_id': device_id}}
|
||||
req = self.new_update_request('ports', data,
|
||||
port['port']['id'])
|
||||
|
||||
res = req.get_response(self.api)
|
||||
port = self.deserialize('json', res)
|
||||
self.assertEqual(len(port['port'][ext_qos.QUEUE]), 36)
|
||||
|
||||
def test_get_port_with_qos_not_admin(self):
|
||||
body = {'qos_queue': {'tenant_id': 'not_admin',
|
||||
'name': 'foo', 'min': 20, 'max': 20}}
|
||||
res = self._create_qos_queue('json', body, tenant_id='not_admin')
|
||||
q1 = self.deserialize('json', res)
|
||||
res = self._create_network('json', 'net1', True,
|
||||
arg_list=(ext_qos.QUEUE, 'tenant_id',),
|
||||
queue_id=q1['qos_queue']['id'],
|
||||
tenant_id="not_admin")
|
||||
net1 = self.deserialize('json', res)
|
||||
self.assertEqual(len(net1['network'][ext_qos.QUEUE]), 36)
|
||||
res = self._create_port('json', net1['network']['id'],
|
||||
tenant_id='not_admin', set_context=True)
|
||||
|
||||
port = self.deserialize('json', res)
|
||||
self.assertNotIn(ext_qos.QUEUE, port['port'])
|
||||
|
||||
def test_dscp_value_out_of_range(self):
|
||||
body = {'qos_queue': {'tenant_id': 'admin', 'dscp': '64',
|
||||
'name': 'foo', 'min': 20, 'max': 20}}
|
||||
res = self._create_qos_queue('json', body)
|
||||
self.assertEqual(res.status_int, 400)
|
||||
|
||||
def test_dscp_value_with_qos_marking_trusted_returns_400(self):
|
||||
body = {'qos_queue': {'tenant_id': 'admin', 'dscp': '1',
|
||||
'qos_marking': 'trusted',
|
||||
'name': 'foo', 'min': 20, 'max': 20}}
|
||||
res = self._create_qos_queue('json', body)
|
||||
self.assertEqual(res.status_int, 400)
|
||||
|
||||
def test_non_admin_cannot_create_queue(self):
|
||||
body = {'qos_queue': {'tenant_id': 'not_admin',
|
||||
'name': 'foo', 'min': 20, 'max': 20}}
|
||||
res = self._create_qos_queue('json', body, tenant_id='not_admin',
|
||||
set_context=True)
|
||||
self.assertEqual(res.status_int, 403)
|
||||
|
||||
def test_update_port_non_admin_does_not_show_queue_id(self):
|
||||
body = {'qos_queue': {'tenant_id': 'not_admin',
|
||||
'name': 'foo', 'min': 20, 'max': 20}}
|
||||
res = self._create_qos_queue('json', body, tenant_id='not_admin')
|
||||
q1 = self.deserialize('json', res)
|
||||
res = self._create_network('json', 'net1', True,
|
||||
arg_list=(ext_qos.QUEUE,),
|
||||
tenant_id='not_admin',
|
||||
queue_id=q1['qos_queue']['id'])
|
||||
|
||||
net1 = self.deserialize('json', res)
|
||||
res = self._create_port('json', net1['network']['id'],
|
||||
tenant_id='not_admin', set_context=True)
|
||||
port = self.deserialize('json', res)
|
||||
device_id = "00fff4d0-e4a8-4a3a-8906-4c4cdafb59f1"
|
||||
data = {'port': {'device_id': device_id}}
|
||||
neutron_context = context.Context('', 'not_admin')
|
||||
port = self._update('ports', port['port']['id'], data,
|
||||
neutron_context=neutron_context)
|
||||
self.assertNotIn(ext_qos.QUEUE, port['port'])
|
||||
|
||||
def test_rxtx_factor(self):
|
||||
with self.qos_queue(max=10) as q1:
|
||||
|
||||
res = self._create_network('json', 'net1', True,
|
||||
arg_list=(ext_qos.QUEUE,),
|
||||
queue_id=q1['qos_queue']['id'])
|
||||
net1 = self.deserialize('json', res)
|
||||
res = self._create_port('json', net1['network']['id'],
|
||||
arg_list=(ext_qos.RXTX_FACTOR,),
|
||||
rxtx_factor=2, device_id='1')
|
||||
port = self.deserialize('json', res)
|
||||
req = self.new_show_request('qos-queues',
|
||||
port['port'][ext_qos.QUEUE])
|
||||
res = req.get_response(self.ext_api)
|
||||
queue = self.deserialize('json', res)
|
||||
self.assertEqual(queue['qos_queue']['max'], 20)
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user