nxv3: mass refactor of nsxlib

This patch refactors the current codebase to be more modular
and testable. The main changes are:

  - pull out all of the profile setup logic from the __init__
    method to it's own method _init_nsx_profiles method to make
    testing the code easier.

  - refactors the nsxlib.v3 code to break out all neutron related
    exceptions and cleans up the interface so we can make nsxlib.v3
    it's own standalone library eventually.

To improve:

  - Currently we have nsxlib.v3.dfw_api and nsxlib.v3.firewall,
    we should refactor this code and merge them into one file.

  - refactor nsxlib to section of each api component to it's own
    subclass. For example, nsxlib().port.create() rather than
    nsxlib().create_port(). I think this would be most useful
    for the security group/firewall integration as there are many
    methods there that are needed to interface with nsx as the security
    group feature is requires the most orchestration with nsx.
    Breaking them into a sub class will make things more easy to understand.

Change-Id: If2fe1e014b78703ff0a9cdff1e4e8d45f3a4a16d
This commit is contained in:
Aaron Rosen 2016-08-02 14:00:17 -07:00 committed by garyk
parent 2c393582cd
commit 86118f6692
33 changed files with 1672 additions and 1443 deletions

View File

@ -83,11 +83,6 @@ class ServiceOverQuota(n_exc.Conflict):
message = _("Quota exceeded for NSX resource %(overs)s: %(err_msg)s")
class ServiceClusterUnavailable(NsxPluginException):
message = _("Service cluster: '%(cluster_id)s' is unavailable. Please, "
"check NSX setup and/or configuration")
class PortConfigurationError(NsxPluginException):
message = _("An error occurred while connecting LSN %(lsn_id)s "
"and network %(net_id)s via port %(port_id)s")
@ -127,27 +122,6 @@ class NoRouterAvailable(n_exc.ResourceExhausted):
"No tenant router is available for allocation.")
class ManagerError(NsxPluginException):
message = _("Unexpected error from backend manager (%(manager)s) "
"for %(operation)s %(details)s")
def __init__(self, **kwargs):
kwargs['details'] = (': %s' % kwargs['details']
if 'details' in kwargs
else '')
super(ManagerError, self).__init__(**kwargs)
self.msg = self.message % kwargs
class ResourceNotFound(ManagerError):
message = _("Resource could not be found on backend (%(manager)s) for "
"%(operation)s")
class StaleRevision(ManagerError):
pass
class NsxL2GWConnectionMappingNotFound(n_exc.NotFound):
message = _('Unable to find mapping for L2 gateway connection: %(conn)s')
@ -165,8 +139,7 @@ class InvalidIPAddress(n_exc.InvalidInput):
class SecurityGroupMaximumCapacityReached(NsxPluginException):
message = _("Security Group %(sg_id)s has reached its maximum capacity, "
"no more ports can be associated with this security-group.")
pass
class NsxResourceNotFound(n_exc.NotFound):

View File

@ -1,4 +1,4 @@
# Copyright 2015 OpenStack Foundation
# Copyright 2016 OpenStack Foundation
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@ -13,388 +13,408 @@
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log
from vmware_nsx._i18n import _, _LW
from vmware_nsx.common import exceptions as nsx_exc
from vmware_nsx.common import nsx_constants
from vmware_nsx.common import utils
from vmware_nsx.nsxlib.v3 import client
from vmware_nsx.nsxlib.v3 import cluster
from vmware_nsx.nsxlib.v3 import dfw_api
from vmware_nsx.nsxlib.v3 import exceptions
from vmware_nsx.nsxlib.v3 import security
LOG = log.getLogger(__name__)
def get_version():
node = client.get_resource("node")
version = node.get('node_version')
return version
# Max amount of time to try a request
DEFAULT_MAX_ATTEMPTS = 3
def get_edge_cluster(edge_cluster_uuid):
resource = "edge-clusters/%s" % edge_cluster_uuid
return client.get_resource(resource)
class NsxLib(dfw_api.DfwApi, security.Security):
MAX_ATTEMPTS = DEFAULT_MAX_ATTEMPTS
@utils.retry_upon_exception_nsxv3(nsx_exc.StaleRevision)
def update_resource_with_retry(resource, payload):
revised_payload = client.get_resource(resource)
for key_name in payload.keys():
revised_payload[key_name] = payload[key_name]
return client.update_resource(resource, revised_payload)
def __init__(self,
username=None,
password=None,
retries=None,
insecure=None,
ca_file=None,
concurrent_connections=None,
http_timeout=None,
http_read_timeout=None,
conn_idle_timeout=None,
http_provider=None,
max_attempts=DEFAULT_MAX_ATTEMPTS):
self.max_attempts = max_attempts
self.cluster = cluster.NSXClusteredAPI(
username=username, password=password,
retries=retries, insecure=insecure,
ca_file=ca_file,
concurrent_connections=concurrent_connections,
http_timeout=http_timeout,
http_read_timeout=http_read_timeout,
conn_idle_timeout=conn_idle_timeout,
http_provider=http_provider)
def delete_resource_by_values(resource, skip_not_found=True, **kwargs):
resources_get = client.get_resource(resource)
matched_num = 0
for res in resources_get['results']:
if utils.dict_match(kwargs, res):
LOG.debug("Deleting %s from resource %s", res, resource)
delete_resource = resource + "/" + str(res['id'])
client.delete_resource(delete_resource)
matched_num = matched_num + 1
if matched_num == 0:
if skip_not_found:
LOG.warning(_LW("No resource in %(res)s matched for values: "
"%(values)s"), {'res': resource,
self.client = client.NSX3Client(self.cluster)
super(NsxLib, self).__init__()
def get_version(self):
node = self.client.get("node")
version = node.get('node_version')
return version
def get_edge_cluster(self, edge_cluster_uuid):
resource = "edge-clusters/%s" % edge_cluster_uuid
return self.client.get(resource)
@utils.retry_upon_exception_nsxv3(exceptions.StaleRevision)
def update_resource_with_retry(self, resource, payload):
revised_payload = self.client.get(resource)
for key_name in payload.keys():
revised_payload[key_name] = payload[key_name]
return self.client.update(resource, revised_payload)
def delete_resource_by_values(self, resource,
skip_not_found=True, **kwargs):
resources_get = self.client.get(resource)
matched_num = 0
for res in resources_get['results']:
if utils.dict_match(kwargs, res):
LOG.debug("Deleting %s from resource %s", res, resource)
delete_resource = resource + "/" + str(res['id'])
self.client.delete(delete_resource)
matched_num = matched_num + 1
if matched_num == 0:
if skip_not_found:
LOG.warning(_LW("No resource in %(res)s matched for values: "
"%(values)s"), {'res': resource,
'values': kwargs})
else:
err_msg = (_("No resource in %(res)s matched for values: "
"%(values)s") % {'res': resource,
'values': kwargs})
raise exceptions.ResourceNotFound(
manager=client._get_nsx_managers_from_conf(),
operation=err_msg)
elif matched_num > 1:
LOG.warning(_LW("%(num)s resources in %(res)s matched for values: "
"%(values)s"), {'num': matched_num,
'res': resource,
'values': kwargs})
else:
err_msg = (_("No resource in %(res)s matched for values: "
"%(values)s") % {'res': resource,
'values': kwargs})
raise nsx_exc.ResourceNotFound(
manager=client._get_nsx_managers_from_conf(),
operation=err_msg)
elif matched_num > 1:
LOG.warning(_LW("%(num)s resources in %(res)s matched for values: "
"%(values)s"), {'num': matched_num,
'res': resource,
'values': kwargs})
def create_logical_switch(self, display_name, transport_zone_id, tags,
replication_mode=nsx_constants.MTEP,
admin_state=True, vlan_id=None):
# TODO(salv-orlando): Validate Replication mode and admin_state
# NOTE: These checks might be moved to the API client library if one
# that performs such checks in the client is available
def create_logical_switch(display_name, transport_zone_id, tags,
replication_mode=nsx_constants.MTEP,
admin_state=True, vlan_id=None):
# TODO(salv-orlando): Validate Replication mode and admin_state
# NOTE: These checks might be moved to the API client library if one that
# performs such checks in the client is available
resource = 'logical-switches'
body = {'transport_zone_id': transport_zone_id,
'replication_mode': replication_mode,
'display_name': display_name,
'tags': tags}
resource = 'logical-switches'
body = {'transport_zone_id': transport_zone_id,
'replication_mode': replication_mode,
'display_name': display_name,
'tags': tags}
if admin_state:
body['admin_state'] = nsx_constants.ADMIN_STATE_UP
else:
body['admin_state'] = nsx_constants.ADMIN_STATE_DOWN
if vlan_id:
body['vlan'] = vlan_id
return client.create_resource(resource, body)
@utils.retry_upon_exception_nsxv3(nsx_exc.StaleRevision,
max_attempts=cfg.CONF.nsx_v3.retries)
def delete_logical_switch(lswitch_id):
resource = 'logical-switches/%s?detach=true&cascade=true' % lswitch_id
client.delete_resource(resource)
def get_logical_switch(logical_switch_id):
resource = "logical-switches/%s" % logical_switch_id
return client.get_resource(resource)
@utils.retry_upon_exception_nsxv3(nsx_exc.StaleRevision,
max_attempts=cfg.CONF.nsx_v3.retries)
def update_logical_switch(lswitch_id, name=None, admin_state=None, tags=None):
resource = "logical-switches/%s" % lswitch_id
lswitch = get_logical_switch(lswitch_id)
if name is not None:
lswitch['display_name'] = name
if admin_state is not None:
if admin_state:
lswitch['admin_state'] = nsx_constants.ADMIN_STATE_UP
body['admin_state'] = nsx_constants.ADMIN_STATE_UP
else:
lswitch['admin_state'] = nsx_constants.ADMIN_STATE_DOWN
if tags is not None:
lswitch['tags'] = tags
return client.update_resource(resource, lswitch)
def add_nat_rule(logical_router_id, action, translated_network,
source_net=None, dest_net=None,
enabled=True, rule_priority=None):
resource = 'logical-routers/%s/nat/rules' % logical_router_id
body = {'action': action,
'enabled': enabled,
'translated_network': translated_network}
if source_net:
body['match_source_network'] = source_net
if dest_net:
body['match_destination_network'] = dest_net
if rule_priority:
body['rule_priority'] = rule_priority
return client.create_resource(resource, body)
def add_static_route(logical_router_id, dest_cidr, nexthop):
resource = 'logical-routers/%s/routing/static-routes' % logical_router_id
body = {}
if dest_cidr:
body['network'] = dest_cidr
if nexthop:
body['next_hops'] = [{"ip_address": nexthop}]
return client.create_resource(resource, body)
def delete_static_route(logical_router_id, static_route_id):
resource = 'logical-routers/%s/routing/static-routes/%s' % (
logical_router_id, static_route_id)
client.delete_resource(resource)
def delete_static_route_by_values(logical_router_id,
dest_cidr=None, nexthop=None):
resource = 'logical-routers/%s/routing/static-routes' % logical_router_id
kwargs = {}
if dest_cidr:
kwargs['network'] = dest_cidr
if nexthop:
kwargs['next_hops'] = [{"ip_address": nexthop}]
return delete_resource_by_values(resource, **kwargs)
def delete_nat_rule(logical_router_id, nat_rule_id):
resource = 'logical-routers/%s/nat/rules/%s' % (logical_router_id,
nat_rule_id)
client.delete_resource(resource)
def delete_nat_rule_by_values(logical_router_id, **kwargs):
resource = 'logical-routers/%s/nat/rules' % logical_router_id
return delete_resource_by_values(resource, **kwargs)
def update_logical_router_advertisement(logical_router_id, **kwargs):
resource = 'logical-routers/%s/routing/advertisement' % logical_router_id
return update_resource_with_retry(resource, kwargs)
def _build_qos_switching_profile_args(tags, name=None, description=None):
body = {"resource_type": "QosSwitchingProfile",
"tags": tags}
return _update_qos_switching_profile_args(
body, name=name, description=description)
def _update_qos_switching_profile_args(body, name=None, description=None):
if name:
body["display_name"] = name
if description:
body["description"] = description
return body
def _enable_shaping_in_args(body, burst_size=None, peak_bandwidth=None,
average_bandwidth=None):
for shaper in body["shaper_configuration"]:
# Neutron currently supports only shaping of Egress traffic
if shaper["resource_type"] == "EgressRateShaper":
shaper["enabled"] = True
if burst_size:
shaper["burst_size_bytes"] = burst_size
if peak_bandwidth:
shaper["peak_bandwidth_mbps"] = peak_bandwidth
if average_bandwidth:
shaper["average_bandwidth_mbps"] = average_bandwidth
break
return body
def _disable_shaping_in_args(body):
for shaper in body["shaper_configuration"]:
# Neutron currently supports only shaping of Egress traffic
if shaper["resource_type"] == "EgressRateShaper":
shaper["enabled"] = False
shaper["burst_size_bytes"] = 0
shaper["peak_bandwidth_mbps"] = 0
shaper["average_bandwidth_mbps"] = 0
break
return body
def _update_dscp_in_args(body, qos_marking, dscp):
body["dscp"] = {}
body["dscp"]["mode"] = qos_marking.upper()
if dscp:
body["dscp"]["priority"] = dscp
return body
def create_qos_switching_profile(tags, name=None,
description=None):
resource = 'switching-profiles'
body = _build_qos_switching_profile_args(tags, name, description)
return client.create_resource(resource, body)
def update_qos_switching_profile(profile_id, tags, name=None,
description=None):
resource = 'switching-profiles/%s' % profile_id
# get the current configuration
body = get_qos_switching_profile(profile_id)
# update the relevant fields
body = _update_qos_switching_profile_args(body, name, description)
return update_resource_with_retry(resource, body)
def update_qos_switching_profile_shaping(profile_id, shaping_enabled=False,
burst_size=None, peak_bandwidth=None,
average_bandwidth=None,
qos_marking=None, dscp=None):
resource = 'switching-profiles/%s' % profile_id
# get the current configuration
body = get_qos_switching_profile(profile_id)
# update the relevant fields
if shaping_enabled:
body = _enable_shaping_in_args(body,
burst_size=burst_size,
peak_bandwidth=peak_bandwidth,
average_bandwidth=average_bandwidth)
else:
body = _disable_shaping_in_args(body)
body = _update_dscp_in_args(body, qos_marking, dscp)
return update_resource_with_retry(resource, body)
def get_qos_switching_profile(profile_id):
resource = 'switching-profiles/%s' % profile_id
return client.get_resource(resource)
def delete_qos_switching_profile(profile_id):
resource = 'switching-profiles/%s' % profile_id
client.delete_resource(resource)
def create_bridge_endpoint(device_name, seg_id, tags):
"""Create a bridge endpoint on the backend.
Create a bridge endpoint resource on a bridge cluster for the L2 gateway
network connection.
:param device_name: device_name actually refers to the bridge cluster's
UUID.
:param seg_id: integer representing the VLAN segmentation ID.
:param tags: nsx backend specific tags.
"""
resource = 'bridge-endpoints'
body = {'bridge_cluster_id': device_name,
'tags': tags,
'vlan': seg_id}
return client.create_resource(resource, body)
def delete_bridge_endpoint(bridge_endpoint_id):
"""Delete a bridge endpoint on the backend.
:param bridge_endpoint_id: string representing the UUID of the bridge
endpoint to be deleted.
"""
resource = 'bridge-endpoints/%s' % bridge_endpoint_id
client.delete_resource(resource)
def _get_resource_by_name_or_id(name_or_id, resource):
all_results = client.get_resource(resource)['results']
matched_results = []
for rs in all_results:
if rs.get('id') == name_or_id:
# Matched by id - must be unique
return name_or_id
if rs.get('display_name') == name_or_id:
# Matched by name - add to the list to verify it is unique
matched_results.append(rs)
if len(matched_results) == 0:
err_msg = (_("Could not find %(resource)s %(name)s") %
{'name': name_or_id, 'resource': resource})
raise nsx_exc.NsxPluginException(err_msg=err_msg)
elif len(matched_results) > 1:
err_msg = (_("Found multiple %(resource)s named %(name)s") %
{'name': name_or_id, 'resource': resource})
raise nsx_exc.NsxPluginException(err_msg=err_msg)
return matched_results[0].get('id')
def get_transport_zone_id_by_name_or_id(name_or_id):
"""Get a transport zone by it's display name or uuid
Return the transport zone data, or raise an exception if not found or
not unique
"""
return _get_resource_by_name_or_id(name_or_id, 'transport-zones')
def get_logical_router_id_by_name_or_id(name_or_id):
"""Get a logical router by it's display name or uuid
Return the logical router data, or raise an exception if not found or
not unique
"""
return _get_resource_by_name_or_id(name_or_id, 'logical-routers')
def get_bridge_cluster_id_by_name_or_id(name_or_id):
"""Get a bridge cluster by it's display name or uuid
Return the bridge cluster data, or raise an exception if not found or
not unique
"""
return _get_resource_by_name_or_id(name_or_id, 'bridge-clusters')
def create_port_mirror_session(source_ports, dest_ports, direction,
description, name, tags):
"""Create a PortMirror Session on the backend.
:param source_ports: List of UUIDs of the ports whose traffic is to be
mirrored.
:param dest_ports: List of UUIDs of the ports where the mirrored traffic is
to be sent.
:param direction: String representing the direction of traffic to be
mirrored. [INGRESS, EGRESS, BIDIRECTIONAL]
:param description: String representing the description of the session.
:param name: String representing the name of the session.
:param tags: nsx backend specific tags.
"""
resource = 'mirror-sessions'
body = {'direction': direction,
'tags': tags,
'display_name': name,
'description': description,
'mirror_sources': source_ports,
'mirror_destination': dest_ports}
return client.create_resource(resource, body)
def delete_port_mirror_session(mirror_session_id):
"""Delete a PortMirror session on the backend.
:param mirror_session_id: string representing the UUID of the port mirror
session to be deleted.
"""
resource = 'mirror-sessions/%s' % mirror_session_id
client.delete_resource(resource)
body['admin_state'] = nsx_constants.ADMIN_STATE_DOWN
if vlan_id:
body['vlan'] = vlan_id
return self.client.create(resource, body)
@utils.retry_upon_exception_nsxv3(exceptions.StaleRevision,
max_attempts=MAX_ATTEMPTS)
def delete_logical_switch(self, lswitch_id):
resource = 'logical-switches/%s?detach=true&cascade=true' % lswitch_id
self.client.delete(resource)
def get_logical_switch(self, logical_switch_id):
resource = "logical-switches/%s" % logical_switch_id
return self.client.get(resource)
@utils.retry_upon_exception_nsxv3(exceptions.StaleRevision,
max_attempts=MAX_ATTEMPTS)
def update_logical_switch(self, lswitch_id, name=None, admin_state=None,
tags=None):
resource = "logical-switches/%s" % lswitch_id
lswitch = self.get_logical_switch(lswitch_id)
if name is not None:
lswitch['display_name'] = name
if admin_state is not None:
if admin_state:
lswitch['admin_state'] = nsx_constants.ADMIN_STATE_UP
else:
lswitch['admin_state'] = nsx_constants.ADMIN_STATE_DOWN
if tags is not None:
lswitch['tags'] = tags
return self.client.update(resource, lswitch)
def add_nat_rule(self, logical_router_id, action, translated_network,
source_net=None, dest_net=None,
enabled=True, rule_priority=None):
resource = 'logical-routers/%s/nat/rules' % logical_router_id
body = {'action': action,
'enabled': enabled,
'translated_network': translated_network}
if source_net:
body['match_source_network'] = source_net
if dest_net:
body['match_destination_network'] = dest_net
if rule_priority:
body['rule_priority'] = rule_priority
return self.client.create(resource, body)
def add_static_route(self, logical_router_id, dest_cidr, nexthop):
resource = ('logical-routers/%s/routing/static-routes' %
logical_router_id)
body = {}
if dest_cidr:
body['network'] = dest_cidr
if nexthop:
body['next_hops'] = [{"ip_address": nexthop}]
return self.client.create(resource, body)
def delete_static_route(self, logical_router_id, static_route_id):
resource = 'logical-routers/%s/routing/static-routes/%s' % (
logical_router_id, static_route_id)
self.client.delete(resource)
def delete_static_route_by_values(self, logical_router_id,
dest_cidr=None, nexthop=None):
resource = ('logical-routers/%s/routing/static-routes' %
logical_router_id)
kwargs = {}
if dest_cidr:
kwargs['network'] = dest_cidr
if nexthop:
kwargs['next_hops'] = [{"ip_address": nexthop}]
return self.delete_resource_by_values(resource, **kwargs)
def delete_nat_rule(self, logical_router_id, nat_rule_id):
resource = 'logical-routers/%s/nat/rules/%s' % (logical_router_id,
nat_rule_id)
self.client.delete(resource)
def delete_nat_rule_by_values(self, logical_router_id, **kwargs):
resource = 'logical-routers/%s/nat/rules' % logical_router_id
return self.delete_resource_by_values(resource, **kwargs)
def update_logical_router_advertisement(self, logical_router_id, **kwargs):
resource = ('logical-routers/%s/routing/advertisement' %
logical_router_id)
return self.update_resource_with_retry(resource, kwargs)
def _build_qos_switching_profile_args(self, tags, name=None,
description=None):
body = {"resource_type": "QosSwitchingProfile",
"tags": tags}
return self._update_qos_switching_profile_args(
body, name=name, description=description)
def _update_qos_switching_profile_args(self, body, name=None,
description=None):
if name:
body["display_name"] = name
if description:
body["description"] = description
return body
def _enable_shaping_in_args(self, body, burst_size=None,
peak_bandwidth=None, average_bandwidth=None):
for shaper in body["shaper_configuration"]:
# Neutron currently supports only shaping of Egress traffic
if shaper["resource_type"] == "EgressRateShaper":
shaper["enabled"] = True
if burst_size:
shaper["burst_size_bytes"] = burst_size
if peak_bandwidth:
shaper["peak_bandwidth_mbps"] = peak_bandwidth
if average_bandwidth:
shaper["average_bandwidth_mbps"] = average_bandwidth
break
return body
def _disable_shaping_in_args(self, body):
for shaper in body["shaper_configuration"]:
# Neutron currently supports only shaping of Egress traffic
if shaper["resource_type"] == "EgressRateShaper":
shaper["enabled"] = False
shaper["burst_size_bytes"] = 0
shaper["peak_bandwidth_mbps"] = 0
shaper["average_bandwidth_mbps"] = 0
break
return body
def _update_dscp_in_args(self, body, qos_marking, dscp):
body["dscp"] = {}
body["dscp"]["mode"] = qos_marking.upper()
if dscp:
body["dscp"]["priority"] = dscp
return body
def create_qos_switching_profile(self, tags, name=None,
description=None):
resource = 'switching-profiles'
body = self._build_qos_switching_profile_args(tags, name,
description)
return self.client.create(resource, body)
def update_qos_switching_profile(self, profile_id, tags, name=None,
description=None):
resource = 'switching-profiles/%s' % profile_id
# get the current configuration
body = self.get_qos_switching_profile(profile_id)
# update the relevant fields
body = self._update_qos_switching_profile_args(body, name,
description)
return self.update_resource_with_retry(resource, body)
def update_qos_switching_profile_shaping(self, profile_id,
shaping_enabled=False,
burst_size=None,
peak_bandwidth=None,
average_bandwidth=None,
qos_marking=None, dscp=None):
resource = 'switching-profiles/%s' % profile_id
# get the current configuration
body = self.get_qos_switching_profile(profile_id)
# update the relevant fields
if shaping_enabled:
body = self._enable_shaping_in_args(
body, burst_size=burst_size,
peak_bandwidth=peak_bandwidth,
average_bandwidth=average_bandwidth)
else:
body = self._disable_shaping_in_args(body)
body = self._update_dscp_in_args(body, qos_marking, dscp)
return self.update_resource_with_retry(resource, body)
def get_qos_switching_profile(self, profile_id):
resource = 'switching-profiles/%s' % profile_id
return self.client.get(resource)
def delete_qos_switching_profile(self, profile_id):
resource = 'switching-profiles/%s' % profile_id
self.client.delete(resource)
def create_bridge_endpoint(self, device_name, seg_id, tags):
"""Create a bridge endpoint on the backend.
Create a bridge endpoint resource on a bridge cluster for the L2
gateway network connection.
:param device_name: device_name actually refers to the bridge cluster's
UUID.
:param seg_id: integer representing the VLAN segmentation ID.
:param tags: nsx backend specific tags.
"""
resource = 'bridge-endpoints'
body = {'bridge_cluster_id': device_name,
'tags': tags,
'vlan': seg_id}
return self.client.create(resource, body)
def delete_bridge_endpoint(self, bridge_endpoint_id):
"""Delete a bridge endpoint on the backend.
:param bridge_endpoint_id: string representing the UUID of the bridge
endpoint to be deleted.
"""
resource = 'bridge-endpoints/%s' % bridge_endpoint_id
self.client.delete(resource)
def _get_resource_by_name_or_id(self, name_or_id, resource):
all_results = self.client.get(resource)['results']
matched_results = []
for rs in all_results:
if rs.get('id') == name_or_id:
# Matched by id - must be unique
return name_or_id
if rs.get('display_name') == name_or_id:
# Matched by name - add to the list to verify it is unique
matched_results.append(rs)
if len(matched_results) == 0:
err_msg = (_("Could not find %(resource)s %(name)s") %
{'name': name_or_id, 'resource': resource})
# XXX improve exception handling...
raise exceptions.ManagerError(details=err_msg)
elif len(matched_results) > 1:
err_msg = (_("Found multiple %(resource)s named %(name)s") %
{'name': name_or_id, 'resource': resource})
# XXX improve exception handling...
raise exceptions.ManagerError(details=err_msg)
return matched_results[0].get('id')
def get_transport_zone_id_by_name_or_id(self, name_or_id):
"""Get a transport zone by it's display name or uuid
Return the transport zone data, or raise an exception if not found or
not unique
"""
return self._get_resource_by_name_or_id(name_or_id,
'transport-zones')
def get_logical_router_id_by_name_or_id(self, name_or_id):
"""Get a logical router by it's display name or uuid
Return the logical router data, or raise an exception if not found or
not unique
"""
return self._get_resource_by_name_or_id(name_or_id,
'logical-routers')
def get_bridge_cluster_id_by_name_or_id(self, name_or_id):
"""Get a bridge cluster by it's display name or uuid
Return the bridge cluster data, or raise an exception if not found or
not unique
"""
return self._get_resource_by_name_or_id(name_or_id,
'bridge-clusters')
def create_port_mirror_session(self, source_ports, dest_ports, direction,
description, name, tags):
"""Create a PortMirror Session on the backend.
:param source_ports: List of UUIDs of the ports whose traffic is to be
mirrored.
:param dest_ports: List of UUIDs of the ports where the mirrored
traffic is to be sent.
:param direction: String representing the direction of traffic to be
mirrored. [INGRESS, EGRESS, BIDIRECTIONAL]
:param description: String representing the description of the session.
:param name: String representing the name of the session.
:param tags: nsx backend specific tags.
"""
resource = 'mirror-sessions'
body = {'direction': direction,
'tags': tags,
'display_name': name,
'description': description,
'mirror_sources': source_ports,
'mirror_destination': dest_ports}
return self.client.create(resource, body)
def delete_port_mirror_session(self, mirror_session_id):
"""Delete a PortMirror session on the backend.
:param mirror_session_id: string representing the UUID of the port
mirror session to be deleted.
"""
resource = 'mirror-sessions/%s' % mirror_session_id
self.client.delete(resource)

View File

@ -20,12 +20,12 @@ from oslo_config import cfg
from oslo_log import log
from oslo_serialization import jsonutils
from vmware_nsx._i18n import _, _LW
from vmware_nsx.common import exceptions as nsx_exc
from vmware_nsx.nsxlib.v3 import exceptions
LOG = log.getLogger(__name__)
ERRORS = {requests.codes.NOT_FOUND: nsx_exc.ResourceNotFound,
requests.codes.PRECONDITION_FAILED: nsx_exc.StaleRevision}
ERRORS = {requests.codes.NOT_FOUND: exceptions.ResourceNotFound,
requests.codes.PRECONDITION_FAILED: exceptions.StaleRevision}
class RESTClient(object):
@ -63,8 +63,8 @@ class RESTClient(object):
def update(self, uuid, body=None, headers=None):
return self.url_put(uuid, body, headers=headers)
def create(self, body=None, headers=None):
return self.url_post('', body, headers=headers)
def create(self, resource='', body=None, headers=None):
return self.url_post(resource, body, headers=headers)
def url_list(self, url, headers=None):
return self.url_get(url, headers=headers)
@ -93,7 +93,7 @@ class RESTClient(object):
'body': result_msg})
manager_error = ERRORS.get(
result.status_code, nsx_exc.ManagerError)
result.status_code, exceptions.ManagerError)
if isinstance(result_msg, dict):
result_msg = result_msg.get('error_message', result_msg)
raise manager_error(

View File

@ -35,8 +35,9 @@ from oslo_service import loopingcall
from requests import adapters
from requests import exceptions as requests_exceptions
from vmware_nsx._i18n import _, _LI, _LW
from vmware_nsx.common import exceptions as nsx_exc
from vmware_nsx.nsxlib.v3 import client as nsx_client
from vmware_nsx.nsxlib.v3 import exceptions
LOG = log.getLogger(__name__)
@ -117,7 +118,7 @@ class NSXRequestsHTTPProvider(AbstractHTTPProvider):
msg = _("No transport zones found "
"for '%s'") % endpoint.provider.url
LOG.warning(msg)
raise nsx_exc.ResourceNotFound(
raise exceptions.ResourceNotFound(
manager=endpoint.provider.url, operation=msg)
def new_connection(self, cluster_api, provider):
@ -392,7 +393,7 @@ class ClusteredAPI(object):
[str(ep) for ep in self._endpoints.values()])
# all endpoints are DOWN and will have their next
# state updated as per _endpoint_keepalive()
raise nsx_exc.ServiceClusterUnavailable(
raise exceptions.ServiceClusterUnavailable(
cluster_id=self.cluster_id)
if endpoint.pool.free() == 0:

View File

@ -19,10 +19,10 @@ NSX-V3 Distributed Firewall
"""
from oslo_log import log
from vmware_nsx._i18n import _, _LW
from vmware_nsx.common import exceptions as nsx_exc
from vmware_nsx._i18n import _LW
from vmware_nsx.common import utils
from vmware_nsx.nsxlib.v3 import client as nsxclient
from vmware_nsx.nsxlib.v3 import exceptions
LOG = log.getLogger(__name__)
@ -72,223 +72,198 @@ IPV6 = 'IPV6'
IPV4_IPV6 = 'IPV4_IPV6'
class NSGroupMemberNotFound(nsx_exc.NsxPluginException):
message = _("Could not find NSGroup %(nsgroup_id)s member %(member_id)s "
"for removal.")
class DfwApi(object):
def get_nsservice(self, resource_type, **properties):
service = {'resource_type': resource_type}
service.update(properties)
return {'service': service}
class NSGroupIsFull(nsx_exc.NsxPluginException):
message = _("NSGroup %(nsgroup_id)s contains has reached its maximum "
"capacity, unable to add additional members.")
def get_nsgroup_port_tag_expression(self, scope, tag):
return {'resource_type': NSGROUP_TAG_EXPRESSION,
'target_type': LOGICAL_PORT,
'scope': scope,
'tag': tag}
def create_nsgroup(self, display_name, description, tags,
membership_criteria=None):
body = {'display_name': display_name,
'description': description,
'tags': tags,
'members': []}
if membership_criteria:
body.update({'membership_criteria': [membership_criteria]})
return self.client.create('ns-groups', body)
def get_nsservice(resource_type, **properties):
service = {'resource_type': resource_type}
service.update(properties)
return {'service': service}
def list_nsgroups(self):
return self.client.get(
'ns-groups?populate_references=false').get('results', [])
@utils.retry_upon_exception_nsxv3(exceptions.StaleRevision)
def update_nsgroup(self, nsgroup_id, display_name=None, description=None,
membership_criteria=None, members=None):
nsgroup = self.read_nsgroup(nsgroup_id)
if display_name is not None:
nsgroup['display_name'] = display_name
if description is not None:
nsgroup['description'] = description
if members is not None:
nsgroup['members'] = members
if membership_criteria is not None:
nsgroup['membership_criteria'] = [membership_criteria]
return self.client.update(
'ns-groups/%s' % nsgroup_id, nsgroup)
def get_nsgroup_port_tag_expression(scope, tag):
return {'resource_type': NSGROUP_TAG_EXPRESSION,
'target_type': LOGICAL_PORT,
'scope': scope,
'tag': tag}
def get_nsgroup_member_expression(self, target_type, target_id):
return {'resource_type': NSGROUP_SIMPLE_EXPRESSION,
'target_property': 'id',
'target_type': target_type,
'op': EQUALS,
'value': target_id}
@utils.retry_upon_exception_nsxv3(exceptions.ManagerError)
def _update_nsgroup_with_members(self, nsgroup_id, members, action):
members_update = 'ns-groups/%s?action=%s' % (nsgroup_id, action)
return self.client.create(members_update, members)
def create_nsgroup(display_name, description, tags, membership_criteria=None):
body = {'display_name': display_name,
'description': description,
'tags': tags,
'members': []}
if membership_criteria:
body.update({'membership_criteria': [membership_criteria]})
return nsxclient.create_resource('ns-groups', body)
def add_nsgroup_members(self, nsgroup_id, target_type, target_ids):
members = []
for target_id in target_ids:
member_expr = self.get_nsgroup_member_expression(
target_type, target_id)
members.append(member_expr)
members = {'members': members}
try:
return self._update_nsgroup_with_members(
nsgroup_id, members, ADD_MEMBERS)
except (exceptions.StaleRevision, exceptions.ResourceNotFound):
raise
except exceptions.ManagerError:
# REVISIT(roeyc): A ManagerError might have been raised for a
# different reason, e.g - NSGroup does not exists.
LOG.warning(_LW("Failed to add %(target_type)s resources "
"(%(target_ids))s to NSGroup %(nsgroup_id)s"),
{'target_type': target_type,
'target_ids': target_ids,
'nsgroup_id': nsgroup_id})
raise exceptions.NSGroupIsFull(nsgroup_id=nsgroup_id)
def list_nsgroups():
return nsxclient.get_resource(
'ns-groups?populate_references=false').get('results', [])
def remove_nsgroup_member(self, nsgroup_id, target_type,
target_id, verify=False):
member_expr = self.get_nsgroup_member_expression(
target_type, target_id)
members = {'members': [member_expr]}
try:
return self._update_nsgroup_with_members(
nsgroup_id, members, REMOVE_MEMBERS)
except exceptions.ManagerError:
if verify:
raise exceptions.NSGroupMemberNotFound(member_id=target_id,
nsgroup_id=nsgroup_id)
def read_nsgroup(self, nsgroup_id):
return self.client.get(
'ns-groups/%s?populate_references=true' % nsgroup_id)
@utils.retry_upon_exception_nsxv3(nsx_exc.StaleRevision)
def update_nsgroup(nsgroup_id, display_name=None, description=None,
membership_criteria=None, members=None):
nsgroup = read_nsgroup(nsgroup_id)
if display_name is not None:
nsgroup['display_name'] = display_name
if description is not None:
nsgroup['description'] = description
if members is not None:
nsgroup['members'] = members
if membership_criteria is not None:
nsgroup['membership_criteria'] = [membership_criteria]
return nsxclient.update_resource('ns-groups/%s' % nsgroup_id, nsgroup)
def delete_nsgroup(self, nsgroup_id):
try:
return self.client.delete(
'ns-groups/%s?force=true' % nsgroup_id)
# FIXME(roeyc): Should only except NotFound error.
except Exception:
LOG.debug("NSGroup %s does not exists for delete request.",
nsgroup_id)
def _build_section(self, display_name, description, applied_tos, tags):
return {'display_name': display_name,
'description': description,
'stateful': True,
'section_type': LAYER3,
'applied_tos': [self.get_nsgroup_reference(t_id)
for t_id in applied_tos],
'tags': tags}
def get_nsgroup_member_expression(target_type, target_id):
return {'resource_type': NSGROUP_SIMPLE_EXPRESSION,
'target_property': 'id',
'target_type': target_type,
'op': EQUALS,
'value': target_id}
def create_empty_section(self, display_name, description, applied_tos,
tags, operation=INSERT_BOTTOM,
other_section=None):
resource = 'firewall/sections?operation=%s' % operation
body = self._build_section(display_name, description,
applied_tos, tags)
if other_section:
resource += '&id=%s' % other_section
return self.client.create(resource, body)
@utils.retry_upon_exception_nsxv3(exceptions.StaleRevision)
def update_section(self, section_id, display_name=None, description=None,
applied_tos=None, rules=None):
resource = 'firewall/sections/%s' % section_id
section = self.read_section(section_id)
@utils.retry_upon_exception_nsxv3(nsx_exc.ManagerError)
def _update_nsgroup_with_members(nsgroup_id, members, action):
members_update = 'ns-groups/%s?action=%s' % (nsgroup_id, action)
return nsxclient.create_resource(members_update, members)
if rules is not None:
resource += '?action=update_with_rules'
section.update({'rules': rules})
if display_name is not None:
section['display_name'] = display_name
if description is not None:
section['description'] = description
if applied_tos is not None:
section['applied_tos'] = [self.get_nsgroup_reference(nsg_id)
for nsg_id in applied_tos]
if rules is not None:
return nsxclient.create_resource(resource, section)
elif any(p is not None for p in (display_name, description,
applied_tos)):
return self.client.update(resource, section)
def read_section(self, section_id):
resource = 'firewall/sections/%s' % section_id
return self.client.get(resource)
def add_nsgroup_members(nsgroup_id, target_type, target_ids):
members = []
for target_id in target_ids:
member_expr = get_nsgroup_member_expression(target_type, target_id)
members.append(member_expr)
members = {'members': members}
try:
return _update_nsgroup_with_members(nsgroup_id, members, ADD_MEMBERS)
except (nsx_exc.StaleRevision, nsx_exc.ResourceNotFound):
raise
except nsx_exc.ManagerError:
# REVISIT(roeyc): A ManagerError might have been raised for a
# different reason, e.g - NSGroup does not exists.
LOG.warning(_LW("Failed to add %(target_type)s resources "
"(%(target_ids))s to NSGroup %(nsgroup_id)s"),
{'target_type': target_type,
'target_ids': target_ids,
'nsgroup_id': nsgroup_id})
raise NSGroupIsFull(nsgroup_id=nsgroup_id)
def list_sections(self):
resource = 'firewall/sections'
return self.client.get(resource).get('results', [])
def delete_section(self, section_id):
resource = 'firewall/sections/%s?cascade=true' % section_id
return self.client.delete(resource)
def remove_nsgroup_member(nsgroup_id, target_type, target_id, verify=False):
member_expr = get_nsgroup_member_expression(target_type, target_id)
members = {'members': [member_expr]}
try:
return _update_nsgroup_with_members(
nsgroup_id, members, REMOVE_MEMBERS)
except nsx_exc.ManagerError:
if verify:
raise NSGroupMemberNotFound(member_id=target_id,
nsgroup_id=nsgroup_id)
def get_nsgroup_reference(self, nsgroup_id):
return {'target_id': nsgroup_id,
'target_type': NSGROUP}
def get_ip_cidr_reference(self, ip_cidr_block, ip_protocol):
target_type = IPV4ADDRESS if ip_protocol == IPV4 else IPV6ADDRESS
return {'target_id': ip_cidr_block,
'target_type': target_type}
def read_nsgroup(nsgroup_id):
return nsxclient.get_resource(
'ns-groups/%s?populate_references=true' % nsgroup_id)
def get_firewall_rule_dict(self, display_name, source=None,
destination=None,
direction=IN_OUT, ip_protocol=IPV4_IPV6,
service=None, action=ALLOW, logged=False):
return {'display_name': display_name,
'sources': [source] if source else [],
'destinations': [destination] if destination else [],
'direction': direction,
'ip_protocol': ip_protocol,
'services': [service] if service else [],
'action': action,
'logged': logged}
def add_rule_in_section(self, rule, section_id):
resource = 'firewall/sections/%s/rules' % section_id
params = '?operation=insert_bottom'
return self.client.create(resource + params, rule)
def delete_nsgroup(nsgroup_id):
try:
return nsxclient.delete_resource('ns-groups/%s?force=true'
% nsgroup_id)
#FIXME(roeyc): Should only except NotFound error.
except Exception:
LOG.debug("NSGroup %s does not exists for delete request.",
nsgroup_id)
def add_rules_in_section(self, rules, section_id):
resource = 'firewall/sections/%s/rules' % section_id
params = '?action=create_multiple&operation=insert_bottom'
return self.client.create(resource + params, {'rules': rules})
def delete_rule(self, section_id, rule_id):
resource = 'firewall/sections/%s/rules/%s' % (section_id, rule_id)
return self.client.delete(resource)
def _build_section(display_name, description, applied_tos, tags):
return {'display_name': display_name,
'description': description,
'stateful': True,
'section_type': LAYER3,
'applied_tos': [get_nsgroup_reference(t_id)
for t_id in applied_tos],
'tags': tags}
def create_empty_section(display_name, description, applied_tos, tags,
operation=INSERT_BOTTOM, other_section=None):
resource = 'firewall/sections?operation=%s' % operation
body = _build_section(display_name, description, applied_tos, tags)
if other_section:
resource += '&id=%s' % other_section
return nsxclient.create_resource(resource, body)
@utils.retry_upon_exception_nsxv3(nsx_exc.StaleRevision)
def update_section(section_id, display_name=None, description=None,
applied_tos=None, rules=None):
resource = 'firewall/sections/%s' % section_id
section = read_section(section_id)
if rules is not None:
resource += '?action=update_with_rules'
section.update({'rules': rules})
if display_name is not None:
section['display_name'] = display_name
if description is not None:
section['description'] = description
if applied_tos is not None:
section['applied_tos'] = [get_nsgroup_reference(nsg_id)
for nsg_id in applied_tos]
if rules is not None:
return nsxclient.create_resource(resource, section)
elif any(p is not None for p in (display_name, description, applied_tos)):
return nsxclient.update_resource(resource, section)
def read_section(section_id):
resource = 'firewall/sections/%s' % section_id
return nsxclient.get_resource(resource)
def list_sections():
resource = 'firewall/sections'
return nsxclient.get_resource(resource).get('results', [])
def delete_section(section_id):
resource = 'firewall/sections/%s?cascade=true' % section_id
try:
return nsxclient.delete_resource(resource)
#FIXME(roeyc): Should only except NotFound error.
except Exception:
LOG.debug("Firewall section %s does not exists for delete request.",
section_id)
def get_nsgroup_reference(nsgroup_id):
return {'target_id': nsgroup_id,
'target_type': NSGROUP}
def get_ip_cidr_reference(ip_cidr_block, ip_protocol):
target_type = IPV4ADDRESS if ip_protocol == IPV4 else IPV6ADDRESS
return {'target_id': ip_cidr_block,
'target_type': target_type}
def get_firewall_rule_dict(display_name, source=None, destination=None,
direction=IN_OUT, ip_protocol=IPV4_IPV6,
service=None, action=ALLOW, logged=False):
return {'display_name': display_name,
'sources': [source] if source else [],
'destinations': [destination] if destination else [],
'direction': direction,
'ip_protocol': ip_protocol,
'services': [service] if service else [],
'action': action,
'logged': logged}
def add_rule_in_section(rule, section_id):
resource = 'firewall/sections/%s/rules' % section_id
params = '?operation=insert_bottom'
return nsxclient.create_resource(resource + params, rule)
def add_rules_in_section(rules, section_id):
resource = 'firewall/sections/%s/rules' % section_id
params = '?action=create_multiple&operation=insert_bottom'
return nsxclient.create_resource(resource + params, {'rules': rules})
def delete_rule(section_id, rule_id):
resource = 'firewall/sections/%s/rules/%s' % (section_id, rule_id)
return nsxclient.delete_resource(resource)
def get_section_rules(section_id):
resource = 'firewall/sections/%s/rules' % section_id
return nsxclient.get_resource(resource)
def get_section_rules(self, section_id):
resource = 'firewall/sections/%s/rules' % section_id
return self.client.get(resource)

View File

@ -0,0 +1,97 @@
# Copyright 2016 VMware, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from oslo_utils import excutils
import six
from vmware_nsx._i18n import _
class NsxLibException(Exception):
"""Base NsxLib Exception.
To correctly use this class, inherit from it and define
a 'message' property. That message will get printf'd
with the keyword arguments provided to the constructor.
"""
message = _("An unknown exception occurred.")
def __init__(self, **kwargs):
try:
super(NsxLibException, self).__init__(self.message % kwargs)
self.msg = self.message % kwargs
except Exception:
with excutils.save_and_reraise_exception() as ctxt:
if not self.use_fatal_exceptions():
ctxt.reraise = False
# at least get the core message out if something happened
super(NsxLibException, self).__init__(self.message)
if six.PY2:
def __unicode__(self):
return unicode(self.msg)
def __str__(self):
return self.msg
def use_fatal_exceptions(self):
return False
class ManagerError(NsxLibException):
message = _("Unexpected error from backend manager (%(manager)s) "
"for %(operation)s %(details)s")
def __init__(self, **kwargs):
kwargs['details'] = (': %s' % kwargs['details']
if 'details' in kwargs
else '')
super(ManagerError, self).__init__(**kwargs)
self.msg = self.message % kwargs
class ResourceNotFound(ManagerError):
message = _("Resource could not be found on backend (%(manager)s) for "
"%(operation)s")
class StaleRevision(ManagerError):
pass
class ServiceClusterUnavailable(ManagerError):
message = _("Service cluster: '%(cluster_id)s' is unavailable. Please, "
"check NSX setup and/or configuration")
class NSGroupMemberNotFound(ManagerError):
message = _("Could not find NSGroup %(nsgroup_id)s member %(member_id)s "
"for removal.")
class NSGroupIsFull(ManagerError):
message = _("NSGroup %(nsgroup_id)s contains has reached its maximum "
"capacity, unable to add additional members.")
class NumberOfNsgroupCriteriaTagsReached(ManagerError):
message = _("Port can be associated with at most %(max_num)s "
"security-groups.")
class SecurityGroupMaximumCapacityReached(ManagerError):
message = _("Security Group %(sg_id)s has reached its maximum capacity, "
"no more ports can be associated with this security-group.")

View File

@ -0,0 +1,166 @@
# Copyright 2015 OpenStack Foundation
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo_config import cfg
from oslo_log import log
from vmware_nsx._i18n import _, _LW
from vmware_nsx.common import utils
from vmware_nsx.nsxlib import v3
from vmware_nsx.nsxlib.v3 import dfw_api as firewall
from vmware_nsx.nsxlib.v3 import exceptions
LOG = log.getLogger(__name__)
class NSGroupManager(object):
"""
This class assists with NSX integration for Neutron security-groups,
Each Neutron security-group is associated with NSX NSGroup object.
Some specific security policies are the same across all security-groups,
i.e - Default drop rule, DHCP. In order to bind these rules to all
NSGroups (security-groups), we create a nested NSGroup (which its members
are also of type NSGroups) to group the other NSGroups and associate it
with these rules.
In practice, one NSGroup (nested) can't contain all the other NSGroups, as
it has strict size limit. To overcome the limited space challange, we
create several nested groups instead of just one, and we evenly distribute
NSGroups (security-groups) between them.
By using an hashing function on the NSGroup uuid we determine in which
group it should be added, and when deleting an NSGroup (security-group) we
use the same procedure to find which nested group it was added.
"""
NESTED_GROUP_NAME = 'OS Nested Group'
NESTED_GROUP_DESCRIPTION = ('OpenStack NSGroup. Do not delete.')
def __init__(self, size):
# XXX intergrate this in a better way..
self.nsx = v3.NsxLib(
username=cfg.CONF.nsx_v3.nsx_api_user,
password=cfg.CONF.nsx_v3.nsx_api_password,
retries=cfg.CONF.nsx_v3.http_retries,
insecure=cfg.CONF.nsx_v3.insecure,
ca_file=cfg.CONF.nsx_v3.ca_file,
concurrent_connections=cfg.CONF.nsx_v3.concurrent_connections,
http_timeout=cfg.CONF.nsx_v3.http_timeout,
http_read_timeout=cfg.CONF.nsx_v3.http_read_timeout,
conn_idle_timeout=cfg.CONF.nsx_v3.conn_idle_timeout,
http_provider=None,
max_attempts=cfg.CONF.nsx_v3.retries)
self._nested_groups = self._init_nested_groups(size)
self._size = len(self._nested_groups)
@property
def size(self):
return self._size
@property
def nested_groups(self):
return self._nested_groups
def _init_nested_groups(self, requested_size):
# Construct the groups dict -
# {0: <groups-1>,.., n-1: <groups-n>}
size = requested_size
nested_groups = {
self._get_nested_group_index_from_name(nsgroup): nsgroup['id']
for nsgroup in self.nsx.list_nsgroups()
if utils.is_internal_resource(nsgroup)}
if nested_groups:
size = max(requested_size, max(nested_groups) + 1)
if size > requested_size:
LOG.warning(_LW("Lowering the value of "
"nsx_v3:number_of_nested_groups isn't "
"supported, '%s' nested-groups will be used."),
size)
absent_groups = set(range(size)) - set(nested_groups.keys())
if absent_groups:
LOG.warning(
_LW("Found %(num_present)s Nested Groups, "
"creating %(num_absent)s more."),
{'num_present': len(nested_groups),
'num_absent': len(absent_groups)})
for i in absent_groups:
cont = self._create_nested_group(i)
nested_groups[i] = cont['id']
return nested_groups
def _get_nested_group_index_from_name(self, nested_group):
# The name format is "Nested Group <index+1>"
return int(nested_group['display_name'].split()[-1]) - 1
def _create_nested_group(self, index):
name_prefix = NSGroupManager.NESTED_GROUP_NAME
name = '%s %s' % (name_prefix, index + 1)
description = NSGroupManager.NESTED_GROUP_DESCRIPTION
tags = utils.build_v3_api_version_tag()
return self.nsx.create_nsgroup(name, description, tags)
def _hash_uuid(self, internal_id):
return hash(uuid.UUID(internal_id))
def _suggest_nested_group(self, internal_id):
# Suggests a nested group to use, can be iterated to find alternative
# group in case that previous suggestions did not help.
index = self._hash_uuid(internal_id) % self.size
yield self.nested_groups[index]
for i in range(1, self.size):
index = (index + 1) % self.size
yield self.nested_groups[index]
def add_nsgroup(self, nsgroup_id):
for group in self._suggest_nested_group(nsgroup_id):
try:
LOG.debug("Adding NSGroup %s to nested group %s",
nsgroup_id, group)
self.nsx.add_nsgroup_members(group,
firewall.NSGROUP,
[nsgroup_id])
break
except exceptions.NSGroupIsFull:
LOG.debug("Nested group %(group_id)s is full, trying the "
"next group..", {'group_id': group})
else:
raise exceptions.ManagerError(
details=_("Reached the maximum supported amount of "
"security groups."))
def remove_nsgroup(self, nsgroup_id):
for group in self._suggest_nested_group(nsgroup_id):
try:
self.nsx.remove_nsgroup_member(
group, firewall.NSGROUP, nsgroup_id, verify=True)
break
except exceptions.NSGroupMemberNotFound:
LOG.warning(_LW("NSGroup %(nsgroup)s was expected to be found "
"in group %(group_id)s, but wasn't. "
"Looking in the next group.."),
{'nsgroup': nsgroup_id, 'group_id': group})
continue
else:
LOG.warning(_LW("NSGroup %s was marked for removal, but its "
"reference is missing."), nsgroup_id)

View File

@ -20,10 +20,10 @@ import six
from oslo_config import cfg
from vmware_nsx._i18n import _
from vmware_nsx.common import exceptions as nsx_exc
from vmware_nsx.common import nsx_constants
from vmware_nsx.common import utils
from vmware_nsx.nsxlib.v3 import client
from vmware_nsx.nsxlib.v3 import exceptions
SwitchingProfileTypeId = collections.namedtuple(
@ -281,13 +281,13 @@ class LogicalPort(AbstractRESTResource):
return self._client.create(body=body)
@utils.retry_upon_exception_nsxv3(
nsx_exc.StaleRevision,
exceptions.StaleRevision,
max_attempts=cfg.CONF.nsx_v3.retries)
def delete(self, lport_id):
return self._client.url_delete('%s?detach=true' % lport_id)
@utils.retry_upon_exception_nsxv3(
nsx_exc.StaleRevision,
exceptions.StaleRevision,
max_attempts=cfg.CONF.nsx_v3.retries)
def update(self, lport_id, vif_uuid,
name=None, admin_state=None,
@ -338,7 +338,7 @@ class LogicalRouter(AbstractRESTResource):
return self._client.url_delete(lrouter_id)
@utils.retry_upon_exception_nsxv3(
nsx_exc.StaleRevision,
exceptions.StaleRevision,
max_attempts=cfg.CONF.nsx_v3.retries)
def update(self, lrouter_id, *args, **kwargs):
lrouter = self.get(lrouter_id)
@ -382,10 +382,10 @@ class LogicalRouterPort(AbstractRESTResource):
if edge_cluster_member_index:
body['edge_cluster_member_index'] = edge_cluster_member_index
return self._client.create(body)
return self._client.create(body=body)
@utils.retry_upon_exception_nsxv3(
nsx_exc.StaleRevision,
exceptions.StaleRevision,
max_attempts=cfg.CONF.nsx_v3.retries)
def update(self, logical_port_id, **kwargs):
logical_router_port = self.get(logical_port_id)
@ -405,15 +405,15 @@ class LogicalRouterPort(AbstractRESTResource):
router_ports = self._client.url_get(resource)
result_count = int(router_ports.get('result_count', "0"))
if result_count >= 2:
raise nsx_exc.NsxPluginException(
err_msg=_("Can't support more than one logical router ports "
raise exceptions.ManagerError(
details=_("Can't support more than one logical router ports "
"on same logical switch %s ") % logical_switch_id)
elif result_count == 1:
return router_ports['results'][0]
else:
err_msg = (_("Logical router link port not found on logical "
"switch %s") % logical_switch_id)
raise nsx_exc.ResourceNotFound(
raise exceptions.ResourceNotFound(
manager=client._get_nsx_managers_from_conf(),
operation=err_msg)
@ -435,7 +435,7 @@ class LogicalRouterPort(AbstractRESTResource):
for port in logical_router_ports:
if port['resource_type'] == nsx_constants.LROUTERPORT_LINKONTIER1:
return port
raise nsx_exc.ResourceNotFound(
raise exceptions.ResourceNotFound(
manager=client._get_nsx_managers_from_conf(),
operation="get router link port")
@ -501,7 +501,7 @@ class LogicalDhcpServer(AbstractRESTResource):
return self._client.create(body=body)
@utils.retry_upon_exception_nsxv3(
nsx_exc.StaleRevision,
exceptions.StaleRevision,
max_attempts=cfg.CONF.nsx_v3.retries)
def update(self, uuid, dhcp_profile_id=None, server_ip=None, name=None,
dns_servers=None, domain_name=None, gateway_ip=None,
@ -529,7 +529,7 @@ class LogicalDhcpServer(AbstractRESTResource):
return self._client.url_get(url)
@utils.retry_upon_exception_nsxv3(
nsx_exc.StaleRevision,
exceptions.StaleRevision,
max_attempts=cfg.CONF.nsx_v3.retries)
def update_binding(self, server_uuid, binding_uuid, **kwargs):
body = self.get_binding(server_uuid, binding_uuid)

View File

@ -22,10 +22,9 @@ from neutron_lib import exceptions as n_exc
from oslo_log import log
from vmware_nsx._i18n import _, _LW
from vmware_nsx.common import exceptions as nsx_exc
from vmware_nsx.common import nsx_constants
from vmware_nsx.common import utils
from vmware_nsx.nsxlib import v3 as nsxlib
from vmware_nsx.nsxlib.v3 import exceptions
LOG = log.getLogger(__name__)
@ -41,15 +40,16 @@ GW_NAT_PRI = 1000
class RouterLib(object):
def __init__(self, router_client, router_port_client):
def __init__(self, router_client, router_port_client, nsxlib):
self._router_client = router_client
self._router_port_client = router_port_client
self.nsxlib = nsxlib
def validate_tier0(self, tier0_groups_dict, tier0_uuid):
err_msg = None
try:
lrouter = self._router_client.get(tier0_uuid)
except nsx_exc.ResourceNotFound:
except exceptions.ResourceNotFound:
err_msg = (_("Tier0 router %s not found at the backend. Either a "
"valid UUID must be specified or a default tier0 "
"router UUID must be configured in nsx.ini") %
@ -60,7 +60,7 @@ class RouterLib(object):
err_msg = _("Failed to get edge cluster uuid from tier0 "
"router %s at the backend") % lrouter
else:
edge_cluster = nsxlib.get_edge_cluster(edge_cluster_uuid)
edge_cluster = self.nsxlib.get_edge_cluster(edge_cluster_uuid)
member_index_list = [member['member_index']
for member in edge_cluster['members']]
if len(member_index_list) < MIN_EDGE_NODE_NUM:
@ -101,7 +101,7 @@ class RouterLib(object):
try:
tier1_link_port = (
self._router_port_client.get_tier1_link_port(tier1_uuid))
except nsx_exc.ResourceNotFound:
except exceptions.ResourceNotFound:
LOG.warning(_LW("Logical router link port for tier1 router: %s "
"not found at the backend"), tier1_uuid)
return
@ -113,20 +113,20 @@ class RouterLib(object):
def update_advertisement(self, logical_router_id, advertise_route_nat,
advertise_route_connected, enabled=True):
return nsxlib.update_logical_router_advertisement(
return self.nsxlib.update_logical_router_advertisement(
logical_router_id,
advertise_nat_routes=advertise_route_nat,
advertise_nsx_connected_routes=advertise_route_connected,
enabled=enabled)
def delete_gw_snat_rule(self, logical_router_id, gw_ip):
return nsxlib.delete_nat_rule_by_values(logical_router_id,
translated_network=gw_ip)
return self.nsxlib.delete_nat_rule_by_values(logical_router_id,
translated_network=gw_ip)
def add_gw_snat_rule(self, logical_router_id, gw_ip):
return nsxlib.add_nat_rule(logical_router_id, action="SNAT",
translated_network=gw_ip,
rule_priority=GW_NAT_PRI)
return self.nsxlib.add_nat_rule(logical_router_id, action="SNAT",
translated_network=gw_ip,
rule_priority=GW_NAT_PRI)
def update_router_edge_cluster(self, nsx_router_id, edge_cluster_uuid):
return self._router_client.update(nsx_router_id,
@ -140,7 +140,7 @@ class RouterLib(object):
address_groups):
try:
port = self._router_port_client.get_by_lswitch_id(ls_id)
except nsx_exc.ResourceNotFound:
except exceptions.ResourceNotFound:
return self._router_port_client.create(
logical_router_id,
display_name,
@ -153,30 +153,31 @@ class RouterLib(object):
port['id'], subnets=address_groups)
def add_fip_nat_rules(self, logical_router_id, ext_ip, int_ip):
nsxlib.add_nat_rule(logical_router_id, action="SNAT",
translated_network=ext_ip,
source_net=int_ip,
rule_priority=FIP_NAT_PRI)
nsxlib.add_nat_rule(logical_router_id, action="DNAT",
translated_network=int_ip,
dest_net=ext_ip,
rule_priority=FIP_NAT_PRI)
self.nsxlib.add_nat_rule(logical_router_id, action="SNAT",
translated_network=ext_ip,
source_net=int_ip,
rule_priority=FIP_NAT_PRI)
self.nsxlib.add_nat_rule(logical_router_id, action="DNAT",
translated_network=int_ip,
dest_net=ext_ip,
rule_priority=FIP_NAT_PRI)
def delete_fip_nat_rules(self, logical_router_id, ext_ip, int_ip):
nsxlib.delete_nat_rule_by_values(logical_router_id,
action="SNAT",
translated_network=ext_ip,
match_source_network=int_ip)
nsxlib.delete_nat_rule_by_values(logical_router_id,
action="DNAT",
translated_network=int_ip,
match_destination_network=ext_ip)
self.nsxlib.delete_nat_rule_by_values(logical_router_id,
action="SNAT",
translated_network=ext_ip,
match_source_network=int_ip)
self.nsxlib.delete_nat_rule_by_values(logical_router_id,
action="DNAT",
translated_network=int_ip,
match_destination_network=ext_ip)
def add_static_routes(self, nsx_router_id, route):
return nsxlib.add_static_route(nsx_router_id, route['destination'],
route['nexthop'])
return self.nsxlib.add_static_route(nsx_router_id,
route['destination'],
route['nexthop'])
def delete_static_routes(self, nsx_router_id, route):
return nsxlib.delete_static_route_by_values(
return self.nsxlib.delete_static_route_by_values(
nsx_router_id, dest_cidr=route['destination'],
nexthop=route['nexthop'])

View File

@ -18,20 +18,18 @@
NSX-V3 Plugin security integration module
"""
import uuid
from neutron_lib import constants
from oslo_config import cfg
from oslo_log import log
from oslo_utils import excutils
from vmware_nsx._i18n import _, _LW, _LE
from vmware_nsx.common import exceptions as nsx_exc
from vmware_nsx._i18n import _LE
from vmware_nsx.common import utils
from vmware_nsx.db import nsx_models
from vmware_nsx.extensions import secgroup_rule_local_ip_prefix
from vmware_nsx.extensions import securitygrouplogging as sg_logging
from vmware_nsx.nsxlib.v3 import dfw_api as firewall
from vmware_nsx.nsxlib.v3 import exceptions
LOG = log.getLogger(__name__)
@ -43,402 +41,269 @@ PORT_SG_SCOPE = 'os-security-group'
MAX_NSGROUPS_CRITERIA_TAGS = 10
def _get_l4_protocol_name(protocol_number):
if protocol_number is None:
return
protocol_number = constants.IP_PROTOCOL_MAP.get(protocol_number,
protocol_number)
protocol_number = int(protocol_number)
if protocol_number == 6:
return firewall.TCP
elif protocol_number == 17:
return firewall.UDP
elif protocol_number == 1:
return firewall.ICMPV4
else:
return protocol_number
# XXX this method should be refactored to pull the common stuff out to
# a security_group utils file.
class Security(object):
def _get_direction(sg_rule):
return firewall.IN if sg_rule['direction'] == 'ingress' else firewall.OUT
def _decide_service(sg_rule):
l4_protocol = _get_l4_protocol_name(sg_rule['protocol'])
direction = _get_direction(sg_rule)
if l4_protocol in [firewall.TCP, firewall.UDP]:
# If port_range_min is not specified then we assume all ports are
# matched, relying on neutron to perform validation.
source_ports = []
if sg_rule['port_range_min'] is None:
destination_ports = []
elif sg_rule['port_range_min'] != sg_rule['port_range_max']:
# NSX API requires a non-empty range (e.g - '22-23')
destination_ports = ['%(port_range_min)s-%(port_range_max)s'
% sg_rule]
def _get_l4_protocol_name(self, protocol_number):
if protocol_number is None:
return
protocol_number = constants.IP_PROTOCOL_MAP.get(protocol_number,
protocol_number)
protocol_number = int(protocol_number)
if protocol_number == 6:
return firewall.TCP
elif protocol_number == 17:
return firewall.UDP
elif protocol_number == 1:
return firewall.ICMPV4
else:
destination_ports = ['%(port_range_min)s' % sg_rule]
return protocol_number
if direction == firewall.OUT:
source_ports, destination_ports = destination_ports, []
def _get_direction(self, sg_rule):
return (
firewall.IN if sg_rule['direction'] == 'ingress' else firewall.OUT
)
return firewall.get_nsservice(firewall.L4_PORT_SET_NSSERVICE,
def _decide_service(self, sg_rule):
l4_protocol = self._get_l4_protocol_name(sg_rule['protocol'])
direction = self._get_direction(sg_rule)
if l4_protocol in [firewall.TCP, firewall.UDP]:
# If port_range_min is not specified then we assume all ports are
# matched, relying on neutron to perform validation.
source_ports = []
if sg_rule['port_range_min'] is None:
destination_ports = []
elif sg_rule['port_range_min'] != sg_rule['port_range_max']:
# NSX API requires a non-empty range (e.g - '22-23')
destination_ports = ['%(port_range_min)s-%(port_range_max)s'
% sg_rule]
else:
destination_ports = ['%(port_range_min)s' % sg_rule]
if direction == firewall.OUT:
source_ports, destination_ports = destination_ports, []
return self.get_nsservice(firewall.L4_PORT_SET_NSSERVICE,
l4_protocol=l4_protocol,
source_ports=source_ports,
destination_ports=destination_ports)
elif l4_protocol == firewall.ICMPV4:
return firewall.get_nsservice(firewall.ICMP_TYPE_NSSERVICE,
elif l4_protocol == firewall.ICMPV4:
return self.get_nsservice(firewall.ICMP_TYPE_NSSERVICE,
protocol=l4_protocol,
icmp_type=sg_rule['port_range_min'],
icmp_code=sg_rule['port_range_max'])
elif l4_protocol is not None:
return firewall.get_nsservice(firewall.IP_PROTOCOL_NSSERVICE,
elif l4_protocol is not None:
return self.get_nsservice(firewall.IP_PROTOCOL_NSSERVICE,
protocol_number=l4_protocol)
def _get_fw_rule_from_sg_rule(self, sg_rule, nsgroup_id, rmt_nsgroup_id,
logged, action):
# IPV4 or IPV6
ip_protocol = sg_rule['ethertype'].upper()
direction = self._get_direction(sg_rule)
def _get_fw_rule_from_sg_rule(sg_rule, nsgroup_id, rmt_nsgroup_id,
logged, action):
# IPV4 or IPV6
ip_protocol = sg_rule['ethertype'].upper()
direction = _get_direction(sg_rule)
if sg_rule.get(secgroup_rule_local_ip_prefix.LOCAL_IP_PREFIX):
local_ip_prefix = self.get_ip_cidr_reference(
sg_rule[secgroup_rule_local_ip_prefix.LOCAL_IP_PREFIX],
ip_protocol)
else:
local_ip_prefix = None
if sg_rule.get(secgroup_rule_local_ip_prefix.LOCAL_IP_PREFIX):
local_ip_prefix = firewall.get_ip_cidr_reference(
sg_rule[secgroup_rule_local_ip_prefix.LOCAL_IP_PREFIX],
ip_protocol)
else:
local_ip_prefix = None
source = None
local_group = self.get_nsgroup_reference(nsgroup_id)
if sg_rule['remote_ip_prefix'] is not None:
source = self.get_ip_cidr_reference(
sg_rule['remote_ip_prefix'], ip_protocol)
destination = local_ip_prefix or local_group
else:
if rmt_nsgroup_id:
source = self.get_nsgroup_reference(rmt_nsgroup_id)
destination = local_ip_prefix or local_group
if direction == firewall.OUT:
source, destination = destination, source
source = None
local_group = firewall.get_nsgroup_reference(nsgroup_id)
if sg_rule['remote_ip_prefix'] is not None:
source = firewall.get_ip_cidr_reference(sg_rule['remote_ip_prefix'],
ip_protocol)
destination = local_ip_prefix or local_group
else:
if rmt_nsgroup_id:
source = firewall.get_nsgroup_reference(rmt_nsgroup_id)
destination = local_ip_prefix or local_group
if direction == firewall.OUT:
source, destination = destination, source
service = self._decide_service(sg_rule)
name = sg_rule['id']
service = _decide_service(sg_rule)
name = sg_rule['id']
return firewall.get_firewall_rule_dict(name, source,
return self.get_firewall_rule_dict(name, source,
destination, direction,
ip_protocol, service,
action, logged)
def create_firewall_rules(self, context, section_id, nsgroup_id,
logging_enabled, action, security_group_rules):
def create_firewall_rules(context, section_id, nsgroup_id, logging_enabled,
action, security_group_rules):
# 1. translate rules
# 2. insert in section
# 3. save mappings
# 1. translate rules
# 2. insert in section
# 3. save mappings
firewall_rules = []
for sg_rule in security_group_rules:
remote_nsgroup_id = self._get_remote_nsg_mapping(
context, sg_rule, nsgroup_id)
firewall_rules = []
for sg_rule in security_group_rules:
remote_nsgroup_id = _get_remote_nsg_mapping(
context, sg_rule, nsgroup_id)
fw_rule = self._get_fw_rule_from_sg_rule(
sg_rule, nsgroup_id, remote_nsgroup_id,
logging_enabled, action)
fw_rule = _get_fw_rule_from_sg_rule(
sg_rule, nsgroup_id, remote_nsgroup_id, logging_enabled, action)
firewall_rules.append(fw_rule)
firewall_rules.append(fw_rule)
return self.add_rules_in_section(firewall_rules, section_id)
return firewall.add_rules_in_section(firewall_rules, section_id)
def _process_firewall_section_rules_logging_for_update(self, section_id,
logging_enabled):
rules = self.get_section_rules(section_id).get('results', [])
update_rules = False
for rule in rules:
if rule['logged'] != logging_enabled:
rule['logged'] = logging_enabled
update_rules = True
return rules if update_rules else None
def set_firewall_rule_logging_for_section(self, section_id, logging):
rules = self._process_firewall_section_rules_logging_for_update(
section_id, logging)
self.update_section(section_id, rules=rules)
def _process_firewall_section_rules_logging_for_update(section_id,
logging_enabled):
rules = firewall.get_section_rules(section_id).get('results', [])
update_rules = False
for rule in rules:
if rule['logged'] != logging_enabled:
rule['logged'] = logging_enabled
update_rules = True
return rules if update_rules else None
def update_security_group_on_backend(self, context, security_group):
nsgroup_id, section_id = self.get_sg_mappings(context.session,
security_group['id'])
name = self.get_nsgroup_name(security_group)
description = security_group['description']
logging = (cfg.CONF.nsx_v3.log_security_groups_allowed_traffic or
security_group[sg_logging.LOGGING])
rules = self._process_firewall_section_rules_logging_for_update(
section_id, logging)
self.update_nsgroup(nsgroup_id, name, description)
self.update_section(section_id, name, description, rules=rules)
def get_nsgroup_name(self, security_group):
# NOTE(roeyc): We add the security-group id to the NSGroup name,
# for usability purposes.
return '%(name)s - %(id)s' % security_group
def set_firewall_rule_logging_for_section(section_id, logging):
rules = _process_firewall_section_rules_logging_for_update(section_id,
logging)
firewall.update_section(section_id, rules=rules)
def save_sg_rule_mappings(self, session, firewall_rules):
# REVISIT(roeyc): This method should take care db access only.
rules = [(rule['display_name'], rule['id']) for rule in firewall_rules]
with session.begin(subtransactions=True):
for neutron_id, nsx_id in rules:
mapping = nsx_models.NeutronNsxRuleMapping(
neutron_id=neutron_id, nsx_id=nsx_id)
session.add(mapping)
return mapping
# XXX db calls should not be here...
def save_sg_mappings(self, session, sg_id, nsgroup_id, section_id):
with session.begin(subtransactions=True):
session.add(
nsx_models.NeutronNsxFirewallSectionMapping(neutron_id=sg_id,
nsx_id=section_id))
session.add(
nsx_models.NeutronNsxSecurityGroupMapping(neutron_id=sg_id,
nsx_id=nsgroup_id))
def update_security_group_on_backend(context, security_group):
nsgroup_id, section_id = get_sg_mappings(context.session,
security_group['id'])
name = get_nsgroup_name(security_group)
description = security_group['description']
logging = (cfg.CONF.nsx_v3.log_security_groups_allowed_traffic or
security_group[sg_logging.LOGGING])
rules = _process_firewall_section_rules_logging_for_update(section_id,
logging)
firewall.update_nsgroup(nsgroup_id, name, description)
firewall.update_section(section_id, name, description, rules=rules)
# XXX db calls should not be here...
def get_sg_rule_mapping(self, session, rule_id):
rule_mapping = session.query(
nsx_models.NeutronNsxRuleMapping).filter_by(
neutron_id=rule_id).one()
return rule_mapping.nsx_id
# XXX db calls should not be here...
def get_sg_mappings(self, session, sg_id):
nsgroup_mapping = session.query(
nsx_models.NeutronNsxSecurityGroupMapping
).filter_by(neutron_id=sg_id).one()
section_mapping = session.query(
nsx_models.NeutronNsxFirewallSectionMapping
).filter_by(neutron_id=sg_id).one()
return nsgroup_mapping.nsx_id, section_mapping.nsx_id
def get_nsgroup_name(security_group):
# NOTE(roeyc): We add the security-group id to the NSGroup name,
# for usability purposes.
return '%(name)s - %(id)s' % security_group
def _get_remote_nsg_mapping(self, context, sg_rule, nsgroup_id):
remote_nsgroup_id = None
remote_group_id = sg_rule.get('remote_group_id')
# skip unnecessary db access when possible
if remote_group_id == sg_rule['security_group_id']:
remote_nsgroup_id = nsgroup_id
elif remote_group_id:
remote_nsgroup_id, s = self.get_sg_mappings(context.session,
remote_group_id)
return remote_nsgroup_id
def get_lport_tags_for_security_groups(self, secgroups):
if len(secgroups) > MAX_NSGROUPS_CRITERIA_TAGS:
raise exceptions.NumberOfNsgroupCriteriaTagsReached(
max_num=MAX_NSGROUPS_CRITERIA_TAGS)
tags = []
for sg in secgroups:
tags = utils.add_v3_tag(tags, PORT_SG_SCOPE, sg)
if not tags:
# This port shouldn't be associated with any security-group
tags = [{'scope': PORT_SG_SCOPE, 'tag': None}]
return tags
def save_sg_rule_mappings(session, firewall_rules):
# REVISIT(roeyc): This method should take care db access only.
rules = [(rule['display_name'], rule['id']) for rule in firewall_rules]
with session.begin(subtransactions=True):
for neutron_id, nsx_id in rules:
mapping = nsx_models.NeutronNsxRuleMapping(
neutron_id=neutron_id, nsx_id=nsx_id)
session.add(mapping)
return mapping
def update_lport_with_security_groups(self, context, lport_id,
original, updated):
added = set(updated) - set(original)
removed = set(original) - set(updated)
for sg_id in added:
nsgroup_id, s = self.get_sg_mappings(context.session, sg_id)
try:
self.add_nsgroup_members(
nsgroup_id, firewall.LOGICAL_PORT, [lport_id])
except exceptions.NSGroupIsFull:
for sg_id in added:
nsgroup_id, s = self.get_sg_mappings(
context.session, sg_id)
# NOTE(roeyc): If the port was not added to the nsgroup
# yet, then this request will silently fail.
self.remove_nsgroup_member(
nsgroup_id, firewall.LOGICAL_PORT, lport_id)
raise exceptions.SecurityGroupMaximumCapacityReached(
sg_id=sg_id)
except exceptions.ResourceNotFound:
with excutils.save_and_reraise_exception():
LOG.error(_LE("NSGroup %s doesn't exists"), nsgroup_id)
for sg_id in removed:
nsgroup_id, s = self.get_sg_mappings(context.session, sg_id)
self.remove_nsgroup_member(
nsgroup_id, firewall.LOGICAL_PORT, lport_id)
def _init_default_section(self, name, description, nested_groups):
fw_sections = self.list_sections()
for section in fw_sections:
if section['display_name'] == name:
break
else:
tags = utils.build_v3_api_version_tag()
section = self.create_empty_section(
name, description, nested_groups, tags)
def save_sg_mappings(session, sg_id, nsgroup_id, section_id):
with session.begin(subtransactions=True):
session.add(
nsx_models.NeutronNsxFirewallSectionMapping(neutron_id=sg_id,
nsx_id=section_id))
session.add(
nsx_models.NeutronNsxSecurityGroupMapping(neutron_id=sg_id,
nsx_id=nsgroup_id))
def get_sg_rule_mapping(session, rule_id):
rule_mapping = session.query(nsx_models.NeutronNsxRuleMapping).filter_by(
neutron_id=rule_id).one()
return rule_mapping.nsx_id
def get_sg_mappings(session, sg_id):
nsgroup_mapping = session.query(nsx_models.NeutronNsxSecurityGroupMapping
).filter_by(neutron_id=sg_id).one()
section_mapping = session.query(nsx_models.NeutronNsxFirewallSectionMapping
).filter_by(neutron_id=sg_id).one()
return nsgroup_mapping.nsx_id, section_mapping.nsx_id
def _get_remote_nsg_mapping(context, sg_rule, nsgroup_id):
remote_nsgroup_id = None
remote_group_id = sg_rule.get('remote_group_id')
# skip unnecessary db access when possible
if remote_group_id == sg_rule['security_group_id']:
remote_nsgroup_id = nsgroup_id
elif remote_group_id:
remote_nsgroup_id, s = get_sg_mappings(context.session,
remote_group_id)
return remote_nsgroup_id
def get_lport_tags_for_security_groups(secgroups):
if len(secgroups) > MAX_NSGROUPS_CRITERIA_TAGS:
raise nsx_exc.NumberOfNsgroupCriteriaTagsReached(
max_num=MAX_NSGROUPS_CRITERIA_TAGS)
tags = []
for sg in secgroups:
tags = utils.add_v3_tag(tags, PORT_SG_SCOPE, sg)
if not tags:
# This port shouldn't be associated with any security-group
tags = [{'scope': PORT_SG_SCOPE, 'tag': None}]
return tags
def update_lport_with_security_groups(context, lport_id, original, updated):
added = set(updated) - set(original)
removed = set(original) - set(updated)
for sg_id in added:
nsgroup_id, s = get_sg_mappings(context.session, sg_id)
try:
firewall.add_nsgroup_members(
nsgroup_id, firewall.LOGICAL_PORT, [lport_id])
except firewall.NSGroupIsFull:
for sg_id in added:
nsgroup_id, s = get_sg_mappings(context.session, sg_id)
# NOTE(roeyc): If the port was not added to the nsgroup yet,
# then this request will silently fail.
firewall.remove_nsgroup_member(
nsgroup_id, firewall.LOGICAL_PORT, lport_id)
raise nsx_exc.SecurityGroupMaximumCapacityReached(sg_id=sg_id)
except nsx_exc.ResourceNotFound:
with excutils.save_and_reraise_exception():
LOG.error(_LE("NSGroup %s doesn't exists"), nsgroup_id)
for sg_id in removed:
nsgroup_id, s = get_sg_mappings(context.session, sg_id)
firewall.remove_nsgroup_member(
nsgroup_id, firewall.LOGICAL_PORT, lport_id)
def init_nsgroup_manager_and_default_section_rules():
section_description = ("This section is handled by OpenStack to contain "
"default rules on security-groups.")
nsgroup_manager = NSGroupManager(cfg.CONF.nsx_v3.number_of_nested_groups)
section_id = _init_default_section(
DEFAULT_SECTION, section_description,
nsgroup_manager.nested_groups.values())
return nsgroup_manager, section_id
def _init_default_section(name, description, nested_groups):
fw_sections = firewall.list_sections()
for section in fw_sections:
if section['display_name'] == name:
break
else:
tags = utils.build_v3_api_version_tag()
section = firewall.create_empty_section(
name, description, nested_groups, tags)
block_rule = firewall.get_firewall_rule_dict(
'Block All', action=firewall.DROP,
logged=cfg.CONF.nsx_v3.log_security_groups_blocked_traffic)
# TODO(roeyc): Add additional rules to allow IPV6 NDP.
dhcp_client = firewall.get_nsservice(firewall.L4_PORT_SET_NSSERVICE,
block_rule = self.get_firewall_rule_dict(
'Block All', action=firewall.DROP,
logged=cfg.CONF.nsx_v3.log_security_groups_blocked_traffic)
# TODO(roeyc): Add additional rules to allow IPV6 NDP.
dhcp_client = self.get_nsservice(firewall.L4_PORT_SET_NSSERVICE,
l4_protocol=firewall.UDP,
source_ports=[67],
destination_ports=[68])
dhcp_client_rule_in = firewall.get_firewall_rule_dict(
'DHCP Reply', direction=firewall.IN, service=dhcp_client)
dhcp_client_rule_in = self.get_firewall_rule_dict(
'DHCP Reply', direction=firewall.IN, service=dhcp_client)
dhcp_server = (
firewall.get_nsservice(firewall.L4_PORT_SET_NSSERVICE,
dhcp_server = (
self.get_nsservice(firewall.L4_PORT_SET_NSSERVICE,
l4_protocol=firewall.UDP,
source_ports=[68],
destination_ports=[67]))
dhcp_client_rule_out = firewall.get_firewall_rule_dict(
'DHCP Request', direction=firewall.OUT, service=dhcp_server)
dhcp_client_rule_out = self.get_firewall_rule_dict(
'DHCP Request', direction=firewall.OUT, service=dhcp_server)
firewall.update_section(section['id'],
self.update_section(section['id'],
name, section['description'],
applied_tos=nested_groups,
rules=[dhcp_client_rule_out,
dhcp_client_rule_in,
block_rule])
return section['id']
class NSGroupManager(object):
"""
This class assists with NSX integration for Neutron security-groups,
Each Neutron security-group is associated with NSX NSGroup object.
Some specific security policies are the same across all security-groups,
i.e - Default drop rule, DHCP. In order to bind these rules to all
NSGroups (security-groups), we create a nested NSGroup (which its members
are also of type NSGroups) to group the other NSGroups and associate it
with these rules.
In practice, one NSGroup (nested) can't contain all the other NSGroups, as
it has strict size limit. To overcome the limited space challange, we
create several nested groups instead of just one, and we evenly distribute
NSGroups (security-groups) between them.
By using an hashing function on the NSGroup uuid we determine in which
group it should be added, and when deleting an NSGroup (security-group) we
use the same procedure to find which nested group it was added.
"""
NESTED_GROUP_NAME = 'OS Nested Group'
NESTED_GROUP_DESCRIPTION = ('OpenStack NSGroup. Do not delete.')
def __init__(self, size):
self._nested_groups = self._init_nested_groups(size)
self._size = len(self._nested_groups)
@property
def size(self):
return self._size
@property
def nested_groups(self):
return self._nested_groups
def _init_nested_groups(self, requested_size):
# Construct the groups dict -
# {0: <groups-1>,.., n-1: <groups-n>}
size = requested_size
nested_groups = {
self._get_nested_group_index_from_name(nsgroup): nsgroup['id']
for nsgroup in firewall.list_nsgroups()
if utils.is_internal_resource(nsgroup)}
if nested_groups:
size = max(requested_size, max(nested_groups) + 1)
if size > requested_size:
LOG.warning(_LW("Lowering the value of "
"nsx_v3:number_of_nested_groups isn't "
"supported, '%s' nested-groups will be used."),
size)
absent_groups = set(range(size)) - set(nested_groups.keys())
if absent_groups:
LOG.warning(
_LW("Found %(num_present)s Nested Groups, "
"creating %(num_absent)s more."),
{'num_present': len(nested_groups),
'num_absent': len(absent_groups)})
for i in absent_groups:
cont = self._create_nested_group(i)
nested_groups[i] = cont['id']
return nested_groups
def _get_nested_group_index_from_name(self, nested_group):
# The name format is "Nested Group <index+1>"
return int(nested_group['display_name'].split()[-1]) - 1
def _create_nested_group(self, index):
name_prefix = NSGroupManager.NESTED_GROUP_NAME
name = '%s %s' % (name_prefix, index + 1)
description = NSGroupManager.NESTED_GROUP_DESCRIPTION
tags = utils.build_v3_api_version_tag()
return firewall.create_nsgroup(name, description, tags)
def _hash_uuid(self, internal_id):
return hash(uuid.UUID(internal_id))
def _suggest_nested_group(self, internal_id):
# Suggests a nested group to use, can be iterated to find alternative
# group in case that previous suggestions did not help.
index = self._hash_uuid(internal_id) % self.size
yield self.nested_groups[index]
for i in range(1, self.size):
index = (index + 1) % self.size
yield self.nested_groups[index]
def add_nsgroup(self, nsgroup_id):
for group in self._suggest_nested_group(nsgroup_id):
try:
LOG.debug("Adding NSGroup %s to nested group %s",
nsgroup_id, group)
firewall.add_nsgroup_members(group,
firewall.NSGROUP,
[nsgroup_id])
break
except firewall.NSGroupIsFull:
LOG.debug("Nested group %(group_id)s is full, trying the "
"next group..", {'group_id': group})
else:
raise nsx_exc.NsxPluginException(
err_msg=_("Reached the maximum supported amount of "
"security groups."))
def remove_nsgroup(self, nsgroup_id):
for group in self._suggest_nested_group(nsgroup_id):
try:
firewall.remove_nsgroup_member(
group, firewall.NSGROUP, nsgroup_id, verify=True)
break
except firewall.NSGroupMemberNotFound:
LOG.warning(_LW("NSGroup %(nsgroup)s was expected to be found "
"in group %(group_id)s, but wasn't. "
"Looking in the next group.."),
{'nsgroup': nsgroup_id, 'group_id': group})
continue
else:
LOG.warning(_LW("NSGroup %s was marked for removal, but its "
"reference is missing."), nsgroup_id)
return section['id']

View File

@ -84,10 +84,10 @@ from vmware_nsx.extensions import maclearning as mac_ext
from vmware_nsx.extensions import providersecuritygroup as provider_sg
from vmware_nsx.extensions import securitygrouplogging as sg_logging
from vmware_nsx.nsxlib import v3 as nsxlib
from vmware_nsx.nsxlib.v3 import client as nsx_client
from vmware_nsx.nsxlib.v3 import cluster as nsx_cluster
from vmware_nsx.nsxlib.v3 import dfw_api as firewall
from vmware_nsx.nsxlib.v3 import exceptions as nsx_lib_exc
from vmware_nsx.nsxlib.v3 import native_dhcp
from vmware_nsx.nsxlib.v3 import ns_group_manager
from vmware_nsx.nsxlib.v3 import resources as nsx_resources
from vmware_nsx.nsxlib.v3 import router
from vmware_nsx.nsxlib.v3 import security
@ -162,11 +162,23 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
def __init__(self):
super(NsxV3Plugin, self).__init__()
LOG.info(_LI("Starting NsxV3Plugin"))
self._nsx_version = nsxlib.get_version()
self.nsxlib = nsxlib.NsxLib(
username=cfg.CONF.nsx_v3.nsx_api_user,
password=cfg.CONF.nsx_v3.nsx_api_password,
retries=cfg.CONF.nsx_v3.http_retries,
insecure=cfg.CONF.nsx_v3.insecure,
ca_file=cfg.CONF.nsx_v3.ca_file,
concurrent_connections=cfg.CONF.nsx_v3.concurrent_connections,
http_timeout=cfg.CONF.nsx_v3.http_timeout,
http_read_timeout=cfg.CONF.nsx_v3.http_read_timeout,
conn_idle_timeout=cfg.CONF.nsx_v3.conn_idle_timeout,
http_provider=None,
max_attempts=cfg.CONF.nsx_v3.retries)
self._nsx_version = self.nsxlib.get_version()
LOG.info(_LI("NSX Version: %s"), self._nsx_version)
self._api_cluster = nsx_cluster.NSXClusteredAPI()
self._nsx_client = nsx_client.NSX3Client(self._api_cluster)
nsx_client._set_default_api_cluster(self._api_cluster)
self._nsx_client = self.nsxlib.client
self.cfg_group = 'nsx_v3' # group name for nsx_v3 section in nsx.ini
self.tier0_groups_dict = {}
@ -180,11 +192,31 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
self._router_port_client = nsx_resources.LogicalRouterPort(
self._nsx_client)
self._routerlib = router.RouterLib(self._router_client,
self._router_port_client)
self._router_port_client,
self.nsxlib)
LOG.debug("Initializing NSX v3 port spoofguard switching profile")
self._switching_profiles = nsx_resources.SwitchingProfile(
self._nsx_client)
# init profiles on nsx backend
(self._psec_profile, self._no_psec_profile_id, self._dhcp_profile,
self._mac_learning_profile) = self._init_nsx_profiles()
# Bind QoS notifications
callbacks_registry.subscribe(qos_utils.handle_qos_notification,
callbacks_resources.QOS_POLICY)
self.start_rpc_listeners_called = False
self._unsubscribe_callback_events()
if cfg.CONF.api_replay_mode:
self.supported_extension_aliases.append('api-replay')
# translate configured transport zones/rotuers names to uuid
self._translate_configured_names_2_uuids()
def _init_nsx_profiles(self):
LOG.debug("Initializing NSX v3 port spoofguard switching profile")
# XXX improve logic to avoid requiring setting this to none.
self._psec_profile = None
self._psec_profile = self._init_port_security_profile()
if not self._psec_profile:
@ -197,24 +229,21 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
self._switching_profiles.find_by_display_name(
NSX_V3_NO_PSEC_PROFILE_NAME)[0])[0]
# Bind QoS notifications
callbacks_registry.subscribe(qos_utils.handle_qos_notification,
callbacks_resources.QOS_POLICY)
self.start_rpc_listeners_called = False
LOG.debug("Initializing NSX v3 DHCP switching profile")
self._dhcp_profile = None
try:
# XXX improve logic to avoid requiring setting this to none.
self._dhcp_profile = None
self._dhcp_profile = self._init_dhcp_switching_profile()
except Exception:
msg = _("Unable to initialize NSX v3 DHCP "
"switching profile: %s") % NSX_V3_DHCP_PROFILE_NAME
raise nsx_exc.NsxPluginException(msg)
self._mac_learning_profile = None
if utils.is_nsx_version_1_1_0(self._nsx_version):
LOG.debug("Initializing NSX v3 Mac Learning switching profile")
try:
# XXX improve logic to avoid requiring setting this to none.
self._mac_learning_profile = None
self._mac_learning_profile = self._init_mac_learning_profile()
# Only expose the extension if it is supported
self.supported_extension_aliases.append('mac-learning')
@ -223,33 +252,28 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
"profile: %(name)s. Reason: %(reason)s"),
{'name': NSX_V3_MAC_LEARNING_PROFILE_NAME,
'reason': e})
self._unsubscribe_callback_events()
if cfg.CONF.api_replay_mode:
self.supported_extension_aliases.append('api-replay')
# translate configured transport zones/rotuers names to uuid
self._translate_configured_names_2_uuids()
return (self._psec_profile, self._no_psec_profile_id,
self._dhcp_profile, self._mac_learning_profile)
def _translate_configured_names_2_uuids(self):
# default VLAN transport zone name / uuid
self._default_vlan_tz_uuid = None
if cfg.CONF.nsx_v3.default_vlan_tz:
tz_id = nsxlib.get_transport_zone_id_by_name_or_id(
tz_id = self.nsxlib.get_transport_zone_id_by_name_or_id(
cfg.CONF.nsx_v3.default_vlan_tz)
self._default_vlan_tz_uuid = tz_id
# default overlay transport zone name / uuid
self._default_overlay_tz_uuid = None
if cfg.CONF.nsx_v3.default_overlay_tz:
tz_id = nsxlib.get_transport_zone_id_by_name_or_id(
tz_id = self.nsxlib.get_transport_zone_id_by_name_or_id(
cfg.CONF.nsx_v3.default_overlay_tz)
self._default_overlay_tz_uuid = tz_id
# default tier0 router
self._default_tier0_router = None
if cfg.CONF.nsx_v3.default_tier0_router:
rtr_id = nsxlib.get_logical_router_id_by_name_or_id(
rtr_id = self.nsxlib.get_logical_router_id_by_name_or_id(
cfg.CONF.nsx_v3.default_tier0_router)
self._default_tier0_router = rtr_id
@ -367,12 +391,12 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
sg_logging.LOGGING])
for sg in [sg for sg in secgroups
if sg[sg_logging.LOGGING] is False]:
_, section_id = security.get_sg_mappings(context.session,
sg['id'])
_, section_id = self.nsxlib.get_sg_mappings(context.session,
sg['id'])
try:
security.set_firewall_rule_logging_for_section(
self.nsxlib.set_firewall_rule_logging_for_section(
section_id, logging=log_all_rules)
except nsx_exc.ManagerError:
except nsx_lib_exc.ManagerError:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to update firewall rule logging "
"for rule in section %s"), section_id)
@ -381,7 +405,14 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
def _init_nsgroup_manager_and_default_section_rules(self):
with locking.LockManager.get_lock('nsxv3_nsgroup_manager_init'):
return security.init_nsgroup_manager_and_default_section_rules()
nsgroup_manager = ns_group_manager.NSGroupManager(
cfg.CONF.nsx_v3.number_of_nested_groups)
section_description = ("This section is handled by OpenStack to "
"contain default rules on security-groups.")
section_id = self.nsxlib._init_default_section(
security.DEFAULT_SECTION, section_description,
nsgroup_manager.nested_groups.values())
return nsgroup_manager, section_id
def _init_dhcp_metadata(self):
if cfg.CONF.nsx_v3.native_dhcp_metadata:
@ -403,7 +434,7 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
cfg.CONF.nsx_v3.dhcp_profile_uuid)
self._dhcp_server = nsx_resources.LogicalDhcpServer(
self._nsx_client)
except nsx_exc.ManagerError:
except nsx_lib_exc.ManagerError:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Unable to retrieve DHCP Profile %s, "
"native DHCP service is not supported"),
@ -415,7 +446,7 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
try:
nsx_resources.MetaDataProxy(self._nsx_client).get(
cfg.CONF.nsx_v3.metadata_proxy_uuid)
except nsx_exc.ManagerError:
except nsx_lib_exc.ManagerError:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Unable to retrieve Metadata Proxy %s, "
"native metadata service is not supported"),
@ -579,9 +610,10 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
'tags': tags,
'admin_state': admin_state,
'vlan_id': vlan_id})
nsx_result = nsxlib.create_logical_switch(net_name, physical_net, tags,
admin_state=admin_state,
vlan_id=vlan_id)
nsx_result = self.nsxlib.create_logical_switch(
net_name, physical_net, tags,
admin_state=admin_state,
vlan_id=vlan_id)
return (is_provider_net,
net_type,
@ -696,7 +728,7 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
LOG.exception(_LE('Failed to create network %s'),
created_net['id'])
if net_type != utils.NetworkTypes.L3_EXT:
nsxlib.delete_logical_switch(created_net['id'])
self.nsxlib.delete_logical_switch(created_net['id'])
# this extra lookup is necessary to get the
# latest db model for the extension functions
@ -778,7 +810,7 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
# TODO(salv-orlando): Handle backend failure, possibly without
# requiring us to un-delete the DB object. For instance, ignore
# failures occurring if logical switch is not found
nsxlib.delete_logical_switch(nsx_net_id)
self.nsxlib.delete_logical_switch(nsx_net_id)
else:
# TODO(berlin): delete subnets public announce on the network
pass
@ -818,7 +850,7 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
try:
# get the nsx switch id from the DB mapping
nsx_id = self._get_network_nsx_id(context, id)
nsxlib.update_logical_switch(
self.nsxlib.update_logical_switch(
nsx_id,
name=utils.get_name_and_uuid(net_data['name'] or 'network',
id),
@ -826,7 +858,7 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
# Backend does not update the admin state of the ports on
# the switch when the switch's admin state changes. Do not
# update the admin state of the ports in neutron either.
except nsx_exc.ManagerError:
except nsx_lib_exc.ManagerError:
LOG.exception(_LE("Unable to update NSX backend, rolling "
"back changes on neutron"))
with excutils.save_and_reraise_exception():
@ -896,7 +928,7 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
LOG.debug("Created DHCP logical port %(port)s for "
"network %(network)s",
{'port': nsx_port['id'], 'network': network['id']})
except nsx_exc.ManagerError:
except nsx_lib_exc.ManagerError:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Unable to create logical DHCP server for "
"network %s"), network['id'])
@ -943,7 +975,7 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
"%(network)s",
{'server': dhcp_service['nsx_service_id'],
'network': network_id})
except nsx_exc.ManagerError:
except nsx_lib_exc.ManagerError:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Unable to delete logical DHCP server %(server)s"
"for network %(network)s"),
@ -1049,7 +1081,7 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
try:
self._dhcp_server.update(
dhcp_service['nsx_service_id'], **kwargs)
except nsx_exc.ManagerError:
except nsx_lib_exc.ManagerError:
with excutils.save_and_reraise_exception():
LOG.error(
_LE("Unable to update logical DHCP server "
@ -1173,7 +1205,7 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
def _get_qos_profile_id(self, context, policy_id):
switch_profile_id = nsx_db.get_switch_profile_by_qos_policy(
context.session, policy_id)
qos_profile = nsxlib.get_qos_switching_profile(switch_profile_id)
qos_profile = self.nsxlib.get_qos_switching_profile(switch_profile_id)
if qos_profile:
profile_ids = self._switching_profiles.build_switch_profile_ids(
self._switching_profiles, qos_profile)
@ -1207,7 +1239,7 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
# If port has no security-groups then we don't need to add any
# security criteria tag.
if port_data[ext_sg.SECURITYGROUPS]:
tags += security.get_lport_tags_for_security_groups(
tags += self.nsxlib.get_lport_tags_for_security_groups(
port_data[ext_sg.SECURITYGROUPS] +
port_data[provider_sg.PROVIDER_SECURITYGROUPS])
@ -1264,7 +1296,7 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
attachment_type=attachment_type,
parent_vif_id=parent_name, parent_tag=tag,
switch_profile_ids=profiles)
except nsx_exc.ManagerError as inst:
except nsx_lib_exc.ManagerError as inst:
# we may fail if the QoS is not supported for this port
# (for example - transport zone with KVM)
LOG.exception(_LE("Unable to create port on the backend: %s"),
@ -1390,7 +1422,7 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
{'mac': port['mac_address'], 'ip': ip,
'port': port['id'], 'server': dhcp_service_id})
return binding
except nsx_exc.ManagerError:
except nsx_lib_exc.ManagerError:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Unable to create static binding (mac: %(mac)s, "
"ip: %(ip)s) for port %(port)s on logical DHCP "
@ -1422,7 +1454,7 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
"logical DHCP server %(server)s",
{'port': binding['port_id'],
'server': binding['nsx_service_id']})
except nsx_exc.ManagerError:
except nsx_lib_exc.ManagerError:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Unable to delete static binding for port "
"%(port)s) on logical DHCP server %(server)s"),
@ -1474,7 +1506,7 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
"%(server)s",
{'ip': new_ip,
'server': dhcp_service['nsx_service_id']})
except nsx_exc.ManagerError:
except nsx_lib_exc.ManagerError:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Unable to update IP %(ip)s for logical "
"DHCP server %(server)s"),
@ -1539,7 +1571,7 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
"for port %(port)s on logical DHCP server %(server)s",
{'mac': mac, 'ip': ip, 'port': binding['port_id'],
'server': binding['nsx_service_id']})
except nsx_exc.ManagerError:
except nsx_lib_exc.ManagerError:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Unable to update static binding (mac: %(mac)s, "
"ip: %(ip)s) for port %(port)s on logical DHCP "
@ -1607,10 +1639,10 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
if not utils.is_nsx_version_1_1_0(self._nsx_version):
try:
security.update_lport_with_security_groups(
self.nsxlib.update_lport_with_security_groups(
context, lport['id'], [], sgids or [])
except Exception:
with excutils.save_and_reraise_exception():
except Exception as e:
with excutils.save_and_reraise_exception(reraise=False):
LOG.debug("Couldn't associate port %s with "
"one or more security-groups, reverting "
"logical-port creation (%s).",
@ -1618,6 +1650,15 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
self._cleanup_port(
context, neutron_db['id'], lport['id'])
# NOTE(arosen): this is to translate between nsxlib
# exceptions and the plugin exceptions. This should be
# later refactored.
if (e.__class__ is
nsx_lib_exc.SecurityGroupMaximumCapacityReached):
raise nsx_exc.SecurityGroupMaximumCapacityReached(
err_msg=e.msg)
else:
raise e
try:
net_id = port_data[pbin.VIF_DETAILS]['nsx-logical-switch-id']
nsx_db.add_neutron_nsx_port_mapping(
@ -1674,7 +1715,7 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
context.session, port_id)
self._port_client.delete(nsx_port_id)
if not utils.is_nsx_version_1_1_0(self._nsx_version):
security.update_lport_with_security_groups(
self.nsxlib.update_lport_with_security_groups(
context, nsx_port_id,
port.get(ext_sg.SECURITYGROUPS, []), [])
self.disassociate_floatingips(context, port_id)
@ -1794,11 +1835,11 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
name = self._get_port_name(context, updated_port)
if utils.is_nsx_version_1_1_0(self._nsx_version):
tags_update += security.get_lport_tags_for_security_groups(
tags_update += self.nsxlib.get_lport_tags_for_security_groups(
updated_port.get(ext_sg.SECURITYGROUPS, []) +
updated_port.get(provider_sg.PROVIDER_SECURITYGROUPS, []))
else:
security.update_lport_with_security_groups(
self.nsxlib.update_lport_with_security_groups(
context, lport_id,
original_port.get(ext_sg.SECURITYGROUPS, []) +
original_port.get(provider_sg.PROVIDER_SECURITYGROUPS, []),
@ -1836,7 +1877,7 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
tags_update=tags_update,
parent_vif_id=parent_vif_id,
parent_tag=tag)
except nsx_exc.ManagerError as inst:
except nsx_lib_exc.ManagerError as inst:
# we may fail if the QoS is not supported for this port
# (for example - transport zone with KVM)
LOG.exception(_LE("Unable to update port on the backend: %s"),
@ -1933,13 +1974,13 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
original_port, updated_port,
address_bindings,
switch_profile_ids)
except (nsx_exc.ManagerError,
nsx_exc.SecurityGroupMaximumCapacityReached):
except (nsx_lib_exc.ManagerError,
nsx_lib_exc.SecurityGroupMaximumCapacityReached) as e:
# In case if there is a failure on NSX-v3 backend, rollback the
# previous update operation on neutron side.
LOG.exception(_LE("Unable to update NSX backend, rolling back "
"changes on neutron"))
with excutils.save_and_reraise_exception():
with excutils.save_and_reraise_exception(reraise=False):
with context.session.begin(subtransactions=True):
super(NsxV3Plugin, self).update_port(
context, id, {'port': original_port})
@ -1960,6 +2001,15 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
self.update_security_group_on_port(
context, id, {'port': original_port},
updated_port, original_port)
# NOTE(arosen): this is to translate between nsxlib
# exceptions and the plugin exceptions. This should be
# later refactored.
if (e.__class__ is
nsx_lib_exc.SecurityGroupMaximumCapacityReached):
raise nsx_exc.SecurityGroupMaximumCapacityReached(
err_msg=e.msg)
else:
raise e
# Update DHCP bindings.
if cfg.CONF.nsx_v3.native_dhcp_metadata:
@ -2164,7 +2214,7 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
if gw_info != const.ATTR_NOT_SPECIFIED:
try:
self._update_router_gw_info(context, router['id'], gw_info)
except nsx_exc.ManagerError:
except nsx_lib_exc.ManagerError:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to set gateway info for router "
"being created: %s - removing router"),
@ -2193,12 +2243,12 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
# passed (and indeed the resource was removed from the Neutron DB
try:
self._router_client.delete(nsx_router_id)
except nsx_exc.ResourceNotFound:
except nsx_lib_exc.ResourceNotFound:
# If the logical router was not found on the backend do not worry
# about it. The conditions has already been logged, so there is no
# need to do further logging
pass
except nsx_exc.ManagerError:
except nsx_lib_exc.ManagerError:
# if there is a failure in deleting the router do not fail the
# operation, especially since the router object has already been
# removed from the neutron DB. Take corrective steps to ensure the
@ -2287,14 +2337,14 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
router_name, port['id'], tag='port')
self._port_client.update(nsx_port_id, None, name=name)
return self._update_router_wrapper(context, router_id, router)
except nsx_exc.ResourceNotFound:
except nsx_lib_exc.ResourceNotFound:
with context.session.begin(subtransactions=True):
router_db = self._get_router(context, router_id)
router_db['status'] = const.NET_STATUS_ERROR
raise nsx_exc.NsxPluginException(
err_msg=(_("logical router %s not found at the backend")
% router_id))
except nsx_exc.ManagerError:
except nsx_lib_exc.ManagerError:
with excutils.save_and_reraise_exception():
router_db = self._get_router(context, router_id)
curr_status = router_db['status']
@ -2427,7 +2477,7 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
# its DHCP port), by creating it if needed.
nsx_rpc.handle_router_metadata_access(self, context, router_id,
interface=info)
except nsx_exc.ManagerError:
except nsx_lib_exc.ManagerError:
with excutils.save_and_reraise_exception():
self.remove_router_interface(
context, router_id, interface_info)
@ -2493,7 +2543,7 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
subnets=address_groups)
else:
self._router_port_client.delete_by_lswitch_id(nsx_net_id)
except nsx_exc.ResourceNotFound:
except nsx_lib_exc.ResourceNotFound:
LOG.error(_LE("router port on router %(router_id)s for net "
"%(net_id)s not found at the backend"),
{'router_id': router_id,
@ -2537,7 +2587,7 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
self._routerlib.add_fip_nat_rules(
nsx_router_id, new_fip['floating_ip_address'],
new_fip['fixed_ip_address'])
except nsx_exc.ManagerError:
except nsx_lib_exc.ManagerError:
with excutils.save_and_reraise_exception():
self.delete_floatingip(context, new_fip['id'])
return new_fip
@ -2552,7 +2602,7 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
self._routerlib.delete_fip_nat_rules(
nsx_router_id, fip['floating_ip_address'],
fip['fixed_ip_address'])
except nsx_exc.ResourceNotFound:
except nsx_lib_exc.ResourceNotFound:
LOG.warning(_LW("Backend NAT rules for fip: %(fip_id)s "
"(ext_ip: %(ext_ip)s int_ip: %(int_ip)s) "
"not found"),
@ -2580,7 +2630,7 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
self._routerlib.delete_fip_nat_rules(
old_nsx_router_id, old_fip['floating_ip_address'],
old_fip['fixed_ip_address'])
except nsx_exc.ResourceNotFound:
except nsx_lib_exc.ResourceNotFound:
LOG.warning(_LW("Backend NAT rules for fip: %(fip_id)s "
"(ext_ip: %(ext_ip)s int_ip: %(int_ip)s) "
"not found"),
@ -2599,7 +2649,7 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
self._routerlib.add_fip_nat_rules(
nsx_router_id, new_fip['floating_ip_address'],
new_fip['fixed_ip_address'])
except nsx_exc.ManagerError:
except nsx_lib_exc.ManagerError:
with excutils.save_and_reraise_exception():
super(NsxV3Plugin, self).update_floatingip(
context, fip_id, {'floatingip': {'port_id': old_port_id}})
@ -2623,7 +2673,7 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
self._routerlib.delete_fip_nat_rules(
nsx_router_id, fip_db.floating_ip_address,
fip_db.fixed_ip_address)
except nsx_exc.ResourceNotFound:
except nsx_lib_exc.ResourceNotFound:
LOG.warning(_LW("Backend NAT rules for fip: %(fip_id)s "
"(ext_ip: %(ext_ip)s int_ip: %(int_ip)s) "
"not found"),
@ -2672,9 +2722,9 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
# security-group rules are located in a dedicated firewall section.
firewall_section = (
firewall.create_empty_section(
nsgroup['display_name'], nsgroup['description'],
[nsgroup['id']], nsgroup['tags'],
self.nsxlib.create_empty_section(
nsgroup.get('display_name'), nsgroup.get('description'),
[nsgroup.get('id')], nsgroup.get('tags'),
operation=operation,
other_section=self.default_section))
return firewall_section
@ -2683,16 +2733,16 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
tags = utils.build_v3_tags_payload(
secgroup, resource_type='os-neutron-secgr-id',
project_name=secgroup['tenant_id'])
name = security.get_nsgroup_name(secgroup)
name = self.nsxlib.get_nsgroup_name(secgroup)
if utils.is_nsx_version_1_1_0(self._nsx_version):
tag_expression = (
firewall.get_nsgroup_port_tag_expression(
self.nsxlib.get_nsgroup_port_tag_expression(
security.PORT_SG_SCOPE, secgroup['id']))
else:
tag_expression = None
ns_group = firewall.create_nsgroup(
ns_group = self.nsxlib.create_nsgroup(
name, secgroup['description'], tags, tag_expression)
# security-group rules are located in a dedicated firewall section.
firewall_section = self._create_fw_section_for_secgroup(
@ -2726,21 +2776,21 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
super(NsxV3Plugin, self).create_security_group(
context, security_group, default_sg))
security.save_sg_mappings(context.session,
secgroup_db['id'],
ns_group['id'],
firewall_section['id'])
self.nsxlib.save_sg_mappings(context.session,
secgroup_db['id'],
ns_group['id'],
firewall_section['id'])
self._process_security_group_properties_create(context,
secgroup_db,
secgroup,
default_sg)
except nsx_exc.ManagerError:
except nsx_lib_exc.ManagerError:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Unable to create security-group on the "
"backend."))
if ns_group:
firewall.delete_nsgroup(ns_group['id'])
self.nsxlib.delete_nsgroup(ns_group['id'])
except Exception:
with excutils.save_and_reraise_exception():
section_id = firewall_section.get('id')
@ -2750,9 +2800,9 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
"section %s, ns-group %s.",
section_id, nsgroup_id)
if nsgroup_id:
firewall.delete_nsgroup(nsgroup_id)
self.nsxlib.delete_nsgroup(nsgroup_id)
if section_id:
firewall.delete_section(section_id)
self.nsxlib.delete_section(section_id)
try:
sg_rules = secgroup_db['security_group_rules']
# skip if there are no rules in group. i.e provider case
@ -2763,13 +2813,13 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
action = (firewall.DROP
if secgroup.get(provider_sg.PROVIDER)
else firewall.ALLOW)
rules = security.create_firewall_rules(
rules = self.nsxlib.create_firewall_rules(
context, firewall_section['id'], ns_group['id'],
logging, action, sg_rules)
security.save_sg_rule_mappings(context.session,
rules['rules'])
self.nsxlib.save_sg_rule_mappings(context.session,
rules['rules'])
self.nsgroup_manager.add_nsgroup(ns_group['id'])
except nsx_exc.ManagerError:
except nsx_lib_exc.ManagerError:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Failed to create backend firewall rules "
"for security-group %(name)s (%(id)s), "
@ -2779,8 +2829,8 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
context = context.elevated()
super(NsxV3Plugin, self).delete_security_group(
context, secgroup_db['id'])
firewall.delete_nsgroup(ns_group['id'])
firewall.delete_section(firewall_section['id'])
self.nsxlib.delete_nsgroup(ns_group['id'])
self.nsxlib.delete_section(firewall_section['id'])
return secgroup_db
@ -2794,8 +2844,8 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
self._process_security_group_properties_update(
context, secgroup_res, security_group['security_group'])
try:
security.update_security_group_on_backend(context, secgroup_res)
except nsx_exc.ManagerError:
self.nsxlib.update_security_group_on_backend(context, secgroup_res)
except nsx_lib_exc.ManagerError:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Failed to update security-group %(name)s "
"(%(id)s), rolling back changes in "
@ -2806,10 +2856,11 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
return secgroup_res
def delete_security_group(self, context, id):
nsgroup_id, section_id = security.get_sg_mappings(context.session, id)
nsgroup_id, section_id = self.nsxlib.get_sg_mappings(
context.session, id)
super(NsxV3Plugin, self).delete_security_group(context, id)
firewall.delete_section(section_id)
firewall.delete_nsgroup(nsgroup_id)
self.nsxlib.delete_section(section_id)
self.nsxlib.delete_nsgroup(nsgroup_id)
self.nsgroup_manager.remove_nsgroup(nsgroup_id)
def create_security_group_rule(self, context, security_group_rule):
@ -2849,26 +2900,26 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
action = firewall.DROP
sg_id = rules_db[0]['security_group_id']
nsgroup_id, section_id = security.get_sg_mappings(context.session,
sg_id)
nsgroup_id, section_id = self.nsxlib.get_sg_mappings(context.session,
sg_id)
logging_enabled = (cfg.CONF.nsx_v3.log_security_groups_allowed_traffic
or self._is_security_group_logged(context, sg_id))
try:
rules = security.create_firewall_rules(
rules = self.nsxlib.create_firewall_rules(
context, section_id, nsgroup_id,
logging_enabled, action, rules_db)
except nsx_exc.ManagerError:
except nsx_lib_exc.ManagerError:
with excutils.save_and_reraise_exception():
for rule in rules_db:
super(NsxV3Plugin, self).delete_security_group_rule(
context, rule['id'])
security.save_sg_rule_mappings(context.session, rules['rules'])
self.nsxlib.save_sg_rule_mappings(context.session, rules['rules'])
return rules_db
def delete_security_group_rule(self, context, id):
rule_db = self._get_security_group_rule(context, id)
sg_id = rule_db['security_group_id']
_, section_id = security.get_sg_mappings(context.session, sg_id)
fw_rule_id = security.get_sg_rule_mapping(context.session, id)
firewall.delete_rule(section_id, fw_rule_id)
_, section_id = self.nsxlib.get_sg_mappings(context.session, sg_id)
fw_rule_id = self.nsxlib.get_sg_rule_mapping(context.session, id)
self.nsxlib.delete_rule(section_id, fw_rule_id)
super(NsxV3Plugin, self).delete_security_group_rule(context, id)

View File

@ -34,11 +34,10 @@ from neutron_lib import constants
from neutron_lib import exceptions as n_exc
from vmware_nsx._i18n import _, _LE, _LI
from vmware_nsx.common import exceptions as nsx_exc
from vmware_nsx.common import nsx_constants
from vmware_nsx.common import utils as nsx_utils
from vmware_nsx.db import db as nsx_db
from vmware_nsx.nsxlib import v3 as nsxlib
from vmware_nsx.nsxlib.v3 import exceptions as nsxlib_exc
LOG = logging.getLogger(__name__)
@ -83,8 +82,9 @@ class NsxV3Driver(l2gateway_db.L2GatewayMixin):
return
admin_ctx = context.get_admin_context()
def_l2gw_uuid = nsxlib.get_bridge_cluster_id_by_name_or_id(
def_l2gw_name)
def_l2gw_uuid = (
self._core_plugin.nsxlib.get_bridge_cluster_id_by_name_or_id(
def_l2gw_name))
# Optimistically create the default L2 gateway in neutron DB
device = {'device_name': def_l2gw_uuid,
@ -221,11 +221,11 @@ class NsxV3Driver(l2gateway_db.L2GatewayMixin):
tags = nsx_utils.build_v3_tags_payload(
gw_connection, resource_type='os-neutron-l2gw-id',
project_name=context.tenant_name)
bridge_endpoint = nsxlib.create_bridge_endpoint(
bridge_endpoint = self._core_plugin.nsxlib.create_bridge_endpoint(
device_name=device_name,
seg_id=seg_id,
tags=tags)
except nsx_exc.ManagerError as e:
except nsxlib_exc.ManagerError as e:
LOG.exception(_LE("Unable to create bridge endpoint, rolling back "
"changes on neutron. Exception is %s"), e)
raise l2gw_exc.L2GatewayServiceDriverError(
@ -252,11 +252,12 @@ class NsxV3Driver(l2gateway_db.L2GatewayMixin):
fixed_ip['subnet_id'],
fixed_ip['ip_address'])
LOG.debug("IP addresses deallocated on port %s", port['id'])
except (nsx_exc.ManagerError,
except (nsxlib_exc.ManagerError,
n_exc.NeutronException):
LOG.exception(_LE("Unable to create L2 gateway port, "
"rolling back changes on neutron"))
nsxlib.delete_bridge_endpoint(bridge_endpoint['id'])
self._core_plugin.nsxlib.delete_bridge_endpoint(
bridge_endpoint['id'])
raise l2gw_exc.L2GatewayServiceDriverError(
method='create_l2_gateway_connection_postcommit')
try:
@ -270,7 +271,8 @@ class NsxV3Driver(l2gateway_db.L2GatewayMixin):
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Unable to add L2 gateway connection "
"mappings, rolling back changes on neutron"))
nsxlib.delete_bridge_endpoint(bridge_endpoint['id'])
self._core_plugin.nsxlib.delete_bridge_endpoint(
bridge_endpoint['id'])
super(NsxV3Driver,
self).delete_l2_gateway_connection(
context,
@ -294,8 +296,8 @@ class NsxV3Driver(l2gateway_db.L2GatewayMixin):
port_id=conn_mapping.get('port_id'),
l2gw_port_check=False)
try:
nsxlib.delete_bridge_endpoint(bridge_endpoint_id)
except nsx_exc.ManagerError as e:
self._core_plugin.nsxlib.delete_bridge_endpoint(bridge_endpoint_id)
except nsxlib_exc.ManagerError as e:
LOG.exception(_LE("Unable to delete bridge endpoint %(id)s on the "
"backend due to exc: %(exc)s"),
{'id': bridge_endpoint_id, 'exc': e})

View File

@ -30,6 +30,7 @@ from vmware_nsx.common import exceptions as nsx_exc
from vmware_nsx.common import utils as nsx_utils
from vmware_nsx.db import db as nsx_db
from vmware_nsx.nsxlib import v3 as nsxlib
from vmware_nsx.nsxlib.v3 import exceptions as nsxlib_exc
from vmware_nsx.nsxlib.v3 import resources as nsx_resources
LOG = logging.getLogger(__name__)
@ -219,7 +220,7 @@ class NsxV3Driver(base_driver.TaasBaseDriver,
direction=direction,
destinations=destinations,
tags=tags))
except nsx_exc.ManagerError:
except nsxlib_exc.ManagerError:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Unable to create port mirror switch profile "
"for tap flow %s on NSX backend, rolling back "
@ -245,7 +246,7 @@ class NsxV3Driver(base_driver.TaasBaseDriver,
self._update_port_at_backend(context=context, port_id=src_port_id,
switching_profile=port_mirror_profile,
delete_profile=False)
except nsx_exc.ManagerError:
except nsxlib_exc.ManagerError:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Unable to update source port %(port)s with "
"switching profile %(profile) for tap flow "
@ -272,14 +273,14 @@ class NsxV3Driver(base_driver.TaasBaseDriver,
context._plugin_context.session, dest_port_id)
# Create port mirror session on the backend
try:
pm_session = nsxlib.create_port_mirror_session(
pm_session = nsxlib.NsxLib().create_port_mirror_session(
source_ports=nsx_src_ports,
dest_ports=nsx_dest_ports,
direction=direction,
description=tf.get('description'),
name=tf.get('name'),
tags=tags)
except nsx_exc.ManagerError:
except nsxlib_exc.ManagerError:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Unable to create port mirror session %s "
"on NSX backend, rolling back "
@ -298,7 +299,7 @@ class NsxV3Driver(base_driver.TaasBaseDriver,
LOG.error(_LE("Unable to create port mirror session db "
"mappings for tap flow %s. Rolling back "
"changes in Neutron."), tf['id'])
nsxlib.delete_port_mirror_session(pm_session['id'])
nsxlib.NsxLib().delete_port_mirror_session(pm_session['id'])
def delete_tap_flow_precommit(self, context):
pass
@ -343,7 +344,7 @@ class NsxV3Driver(base_driver.TaasBaseDriver,
self._update_port_at_backend(context=context, port_id=src_port_id,
switching_profile=port_mirror_profile,
delete_profile=True)
except nsx_exc.ManagerError:
except nsxlib_exc.ManagerError:
LOG.error(_LE("Unable to update source port %(port)s "
"to delete port mirror profile %(pm)s on NSX "
"backend."),
@ -352,7 +353,7 @@ class NsxV3Driver(base_driver.TaasBaseDriver,
try:
# Delete port mirroring switching profile
self._nsx_plugin._switching_profiles.delete(uuid=pm_profile_id)
except nsx_exc.ManagerError:
except nsxlib_exc.ManagerError:
LOG.error(_LE("Unable to delete port mirror switching profile "
"%s on NSX backend."), pm_profile_id)
@ -360,7 +361,7 @@ class NsxV3Driver(base_driver.TaasBaseDriver,
# Delete port mirroring session on the backend
try:
nsxlib.delete_port_mirror_session(pm_session_id)
except nsx_exc.ManagerError:
except nsxlib_exc.ManagerError:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Unable to delete port mirror session %s "
"on NSX backend."), pm_session_id)

View File

@ -16,6 +16,7 @@
from neutron.api.rpc.callbacks import events as callbacks_events
from neutron import context as n_context
from neutron import manager
from neutron.objects.qos import policy as qos_policy
from neutron.services.qos import qos_consts
from neutron_lib.api import validators
@ -26,7 +27,6 @@ from vmware_nsx._i18n import _, _LW
from vmware_nsx.common import exceptions as nsx_exc
from vmware_nsx.common import utils
from vmware_nsx.db import db as nsx_db
from vmware_nsx.nsxlib import v3 as nsxlib
LOG = logging.getLogger(__name__)
MAX_KBPS_MIN_VALUE = 1024
@ -75,6 +75,10 @@ class QosNotificationsHandler(object):
def __init__(self):
super(QosNotificationsHandler, self).__init__()
@property
def _core_plugin(self):
return manager.NeutronManager.get_plugin()
def _get_tags(self, context, policy):
policy_dict = {'id': policy.id, 'tenant_id': policy.tenant_id}
return utils.build_v3_tags_payload(
@ -84,7 +88,7 @@ class QosNotificationsHandler(object):
def create_policy(self, context, policy):
policy_id = policy.id
tags = self._get_tags(context, policy)
result = nsxlib.create_qos_switching_profile(
result = self._core_plugin.nsxlib.create_qos_switching_profile(
tags=tags, name=policy.name,
description=policy.description)
if not result or not validators.is_attr_set(result.get('id')):
@ -100,13 +104,13 @@ class QosNotificationsHandler(object):
def delete_policy(self, context, policy_id):
profile_id = nsx_db.get_switch_profile_by_qos_policy(
context.session, policy_id)
nsxlib.delete_qos_switching_profile(profile_id)
self._core_plugin.nsxlib.delete_qos_switching_profile(profile_id)
def update_policy(self, context, policy_id, policy):
profile_id = nsx_db.get_switch_profile_by_qos_policy(
context.session, policy_id)
tags = self._get_tags(context, policy)
nsxlib.update_qos_switching_profile(
self._core_plugin.nsxlib.update_qos_switching_profile(
profile_id,
tags=tags,
name=policy.name,
@ -176,8 +180,7 @@ class QosNotificationsHandler(object):
average_bw) = self._get_bw_values_from_rule(bw_rule)
qos_marking, dscp = self._get_dscp_values_from_rule(dscp_rule)
nsxlib.update_qos_switching_profile_shaping(
self._core_plugin.nsxlib.update_qos_switching_profile_shaping(
profile_id,
shaping_enabled=shaping_enabled,
burst_size=burst_size,

View File

@ -21,9 +21,6 @@ from oslo_config import cfg
from vmware_nsx._i18n import _LI
from vmware_nsx.common import nsx_constants
from vmware_nsx.common import utils as comm_utils
from vmware_nsx.nsxlib import v3 as nsxlib
from vmware_nsx.nsxlib.v3 import client
from vmware_nsx.nsxlib.v3 import cluster
from vmware_nsx.nsxlib.v3 import native_dhcp
from vmware_nsx.nsxlib.v3 import resources
from vmware_nsx.shell.admin.plugins.common import constants
@ -51,15 +48,13 @@ def list_dhcp_bindings(resource, event, trigger, **kwargs):
def nsx_update_dhcp_bindings(resource, event, trigger, **kwargs):
"""Resync DHCP bindings for NSXv3 CrossHairs."""
nsx_version = nsxlib.get_version()
nsx_version = utils.get_connected_nsxlib().get_version()
if not comm_utils.is_nsx_version_1_1_0(nsx_version):
LOG.info(_LI("This utility is not available for NSX version %s"),
nsx_version)
return
cluster_api = cluster.NSXClusteredAPI()
nsx_client = client.NSX3Client(cluster_api)
client._set_default_api_cluster(cluster_api)
nsx_client = utils.get_nsxv3_client()
port_resource = resources.LogicalPort(nsx_client)
dhcp_server_resource = resources.LogicalDhcpServer(nsx_client)

View File

@ -20,8 +20,6 @@ from oslo_config import cfg
from vmware_nsx._i18n import _LI, _LE
from vmware_nsx.common import nsx_constants
from vmware_nsx.common import utils as nsx_utils
from vmware_nsx.nsxlib.v3 import client
from vmware_nsx.nsxlib.v3 import cluster
from vmware_nsx.nsxlib.v3 import resources
from vmware_nsx.shell.admin.plugins.common import constants
from vmware_nsx.shell.admin.plugins.common import utils as admin_utils
@ -36,9 +34,7 @@ neutron_client = utils.NeutronDbClient()
def nsx_update_metadata_proxy(resource, event, trigger, **kwargs):
"""Update Metadata proxy for NSXv3 CrossHairs."""
cluster_api = cluster.NSXClusteredAPI()
nsx_client = client.NSX3Client(cluster_api)
client._set_default_api_cluster(cluster_api)
nsx_client = utils.get_nsxv3_client()
port_resource = resources.LogicalPort(nsx_client)
for network in neutron_client.get_networks():

View File

@ -18,7 +18,6 @@ import logging
from vmware_nsx._i18n import _LI
from vmware_nsx.common import exceptions as nsx_exc
from vmware_nsx.db import db as nsx_db
from vmware_nsx.nsxlib import v3 as nsxlib
from vmware_nsx.shell.admin.plugins.common import constants
from vmware_nsx.shell.admin.plugins.common import formatters
from vmware_nsx.shell.admin.plugins.common import utils as admin_utils
@ -55,7 +54,7 @@ def list_missing_networks(resource, event, trigger, **kwargs):
pass
else:
try:
nsxlib.get_logical_switch(nsx_id)
admin_utils.get_connected_nsxlib().get_logical_switch(nsx_id)
except nsx_exc.ResourceNotFound:
networks.append({'name': net['name'],
'neutron_id': neutron_id,

View File

@ -21,14 +21,13 @@ from vmware_nsx._i18n import _LI, _LW
from vmware_nsx.common import exceptions as nsx_exc
from vmware_nsx.db import db as nsx_db
from vmware_nsx.db import nsx_models
from vmware_nsx.nsxlib.v3 import client
from vmware_nsx.nsxlib.v3 import cluster
from vmware_nsx.nsxlib.v3 import resources
from vmware_nsx.plugins.nsx_v3 import plugin
from vmware_nsx.services.qos.common import utils as qos_utils
from vmware_nsx.shell.admin.plugins.common import constants
from vmware_nsx.shell.admin.plugins.common import formatters
from vmware_nsx.shell.admin.plugins.common import utils as admin_utils
from vmware_nsx.shell.admin.plugins.nsxv3.resources import utils as v3_utils
from vmware_nsx.shell import resources as shell
from neutron.callbacks import registry
@ -60,8 +59,7 @@ def get_port_nsx_id(session, neutron_id):
def get_port_and_profile_clients():
_api_cluster = cluster.NSXClusteredAPI()
_nsx_client = client.NSX3Client(_api_cluster)
_nsx_client = v3_utils.get_nsxv3_client()
return (resources.LogicalPort(_nsx_client),
resources.SwitchingProfile(_nsx_client))

View File

@ -18,12 +18,11 @@ import logging
from vmware_nsx._i18n import _LI
from vmware_nsx.common import exceptions as nsx_exc
from vmware_nsx.db import db as nsx_db
from vmware_nsx.nsxlib.v3 import client as nsx_client
from vmware_nsx.nsxlib.v3 import cluster as nsx_cluster
from vmware_nsx.nsxlib.v3 import resources as nsx_resources
from vmware_nsx.shell.admin.plugins.common import constants
from vmware_nsx.shell.admin.plugins.common import formatters
from vmware_nsx.shell.admin.plugins.common import utils as admin_utils
from vmware_nsx.shell.admin.plugins.nsxv3.resources import utils
from vmware_nsx.shell import resources as shell
from neutron.callbacks import registry
@ -40,8 +39,7 @@ class RoutersPlugin(db_base_plugin_v2.NeutronDbPluginV2,
def get_router_client():
_api_cluster = nsx_cluster.NSXClusteredAPI()
_nsx_client = nsx_client.NSX3Client(_api_cluster)
_nsx_client = utils.get_nsxv3_client()
return nsx_resources.LogicalRouter(_nsx_client)

View File

@ -31,7 +31,6 @@ from vmware_nsx.shell.admin.plugins.nsxv3.resources import ports
from vmware_nsx.shell.admin.plugins.nsxv3.resources import utils as v3_utils
from vmware_nsx.shell import resources as shell
from vmware_nsx._i18n import _LE, _LW
from vmware_nsx.nsxlib import v3 as nsxlib
from vmware_nsx.nsxlib.v3 import dfw_api as firewall
from vmware_nsx.nsxlib.v3 import security
@ -107,6 +106,7 @@ class NeutronSecurityGroupApi(securitygroups_db.SecurityGroupDbMixin,
neutron_sg = NeutronSecurityGroupApi()
neutron_db = v3_utils.NeutronDbClient()
nsxlib = v3_utils.get_connected_nsxlib()
def _log_info(resource, data, attrs=['display_name', 'id']):
@ -126,7 +126,7 @@ def list_security_groups_mappings(resource, event, trigger, **kwargs):
@admin_utils.list_handler(constants.FIREWALL_SECTIONS)
@admin_utils.output_header
def nsx_list_dfw_sections(resource, event, trigger, **kwargs):
fw_sections = firewall.list_sections()
fw_sections = nsxlib.list_sections()
_log_info(constants.FIREWALL_SECTIONS, fw_sections)
return bool(fw_sections)
@ -134,13 +134,13 @@ def nsx_list_dfw_sections(resource, event, trigger, **kwargs):
@admin_utils.list_handler(constants.FIREWALL_NSX_GROUPS)
@admin_utils.output_header
def nsx_list_security_groups(resource, event, trigger, **kwargs):
nsx_secgroups = firewall.list_nsgroups()
nsx_secgroups = nsxlib.list_nsgroups()
_log_info(constants.FIREWALL_NSX_GROUPS, nsx_secgroups)
return bool(nsx_secgroups)
def _find_missing_security_groups():
nsx_secgroups = firewall.list_nsgroups()
nsx_secgroups = nsxlib.list_nsgroups()
sg_mappings = neutron_sg.get_security_groups_mappings()
missing_secgroups = {}
for sg_db in sg_mappings:
@ -169,7 +169,7 @@ def list_missing_security_groups(resource, event, trigger, **kwargs):
def _find_missing_sections():
fw_sections = firewall.list_sections()
fw_sections = nsxlib.list_sections()
sg_mappings = neutron_sg.get_security_groups_mappings()
missing_sections = {}
for sg_db in sg_mappings:
@ -204,13 +204,13 @@ def fix_security_groups(resource, event, trigger, **kwargs):
for sg_id, sg in inconsistent_secgroups.items():
secgroup = plugin.get_security_group(context_, sg_id)
firewall.delete_section(sg['section-id'])
firewall.delete_nsgroup(sg['nsx-securitygroup-id'])
nsxlib.delete_section(sg['section-id'])
nsxlib.delete_nsgroup(sg['nsx-securitygroup-id'])
neutron_sg.delete_security_group_section_mapping(sg_id)
neutron_sg.delete_security_group_backend_mapping(sg_id)
nsgroup, fw_section = (
plugin._create_security_group_backend_resources(secgroup))
security.save_sg_mappings(
nsxlib.save_sg_mappings(
context_.session, sg_id, nsgroup['id'], fw_section['id'])
# If version > 1.1 then we use dynamic criteria tags, and the port
# should already have them.
@ -219,7 +219,7 @@ def fix_security_groups(resource, event, trigger, **kwargs):
for port_id in neutron_db.get_ports_in_security_group(sg_id):
lport_id = neutron_db.get_logical_port_id(port_id)
members.append(lport_id)
firewall.add_nsgroup_members(
nsxlib.add_nsgroup_members(
nsgroup['id'], firewall.LOGICAL_PORT, members)
for rule in secgroup['security_group_rules']:
@ -231,11 +231,11 @@ def fix_security_groups(resource, event, trigger, **kwargs):
action = (firewall.DROP
if secgroup.get(provider_sg.PROVIDER)
else firewall.ALLOW)
rules = security.create_firewall_rules(
rules = nsxlib.create_firewall_rules(
context_, fw_section['id'], nsgroup['id'],
secgroup.get(sg_logging.LOGGING, False), action,
secgroup['security_group_rules'])
security.save_sg_rule_mappings(context_.session, rules['rules'])
nsxlib.save_sg_rule_mappings(context_.session, rules['rules'])
# Add nsgroup to a nested group
plugin.nsgroup_manager.add_nsgroup(nsgroup['id'])
@ -250,7 +250,7 @@ def _update_ports_dynamic_criteria_tags():
_, lport_id = neutron_db.get_lswitch_and_lport_id(port['id'])
lport = port_client.get(lport_id)
criteria_tags = security.get_lport_tags_for_security_groups(secgroups)
criteria_tags = nsxlib.get_lport_tags_for_security_groups(secgroups)
lport['tags'] = utils.update_v3_tags(
lport.get('tags', []), criteria_tags)
port_client._client.update(lport_id, body=lport)
@ -260,14 +260,14 @@ def _update_security_group_dynamic_criteria():
secgroups = neutron_sg.get_security_groups()
for sg in secgroups:
nsgroup_id = neutron_sg.get_nsgroup_id(sg['id'])
membership_criteria = firewall.get_nsgroup_port_tag_expression(
membership_criteria = nsxlib.get_nsgroup_port_tag_expression(
security.PORT_SG_SCOPE, sg['id'])
try:
# We want to add the dynamic criteria and remove all direct members
# they will be added by the manager using the new criteria.
firewall.update_nsgroup(nsgroup_id,
membership_criteria=membership_criteria,
members=[])
nsxlib.update_nsgroup(nsgroup_id,
membership_criteria=membership_criteria,
members=[])
except Exception as e:
LOG.warning(_LW("Failed to update membership criteria for nsgroup "
"%(nsgroup_id)s, request to backend returned "

View File

@ -12,13 +12,35 @@
# License for the specific language governing permissions and limitations
# under the License.
from neutron import context
from neutron.db import db_base_plugin_v2
from oslo_config import cfg
from vmware_nsx.db import db as nsx_db
from vmware_nsx.nsxlib import v3
from vmware_nsx.plugins.nsx_v3 import plugin
def get_nsxv3_client():
return get_connected_nsxlib().client
def get_connected_nsxlib():
return v3.NsxLib(
username=cfg.CONF.nsx_v3.nsx_api_user,
password=cfg.CONF.nsx_v3.nsx_api_password,
retries=cfg.CONF.nsx_v3.http_retries,
insecure=cfg.CONF.nsx_v3.insecure,
ca_file=cfg.CONF.nsx_v3.ca_file,
concurrent_connections=cfg.CONF.nsx_v3.concurrent_connections,
http_timeout=cfg.CONF.nsx_v3.http_timeout,
http_read_timeout=cfg.CONF.nsx_v3.http_read_timeout,
conn_idle_timeout=cfg.CONF.nsx_v3.conn_idle_timeout,
http_provider=None,
max_attempts=cfg.CONF.nsx_v3.retries)
class NeutronDbClient(db_base_plugin_v2.NeutronDbPluginV2):
def __init__(self):
super(NeutronDbClient, self).__init__()

View File

@ -244,8 +244,8 @@ class ProviderSecurityGroupExtTestCase(
self.assertEqual([sg_id], port['port']['security_groups'])
class TestNSXv3ProviderSecurityGrp(test_nsxv3_plugin.NsxV3PluginTestCaseMixin,
ProviderSecurityGroupExtTestCase):
class TestNSXv3ProviderSecurityGrp(ProviderSecurityGroupExtTestCase,
test_nsxv3_plugin.NsxV3PluginTestCaseMixin):
pass

View File

@ -27,7 +27,6 @@ from neutron_lib import constants as const
from vmware_nsx.db import extended_security_group_rule as ext_rule_db
from vmware_nsx.extensions import secgroup_rule_local_ip_prefix as ext_loip
from vmware_nsx.nsxlib.v3 import dfw_api as v3_fw
from vmware_nsx.plugins.nsx_v.vshield import securitygroup_utils
from vmware_nsx.tests.unit.nsx_v import test_plugin as test_nsxv_plugin
from vmware_nsx.tests.unit.nsx_v3 import test_plugin as test_nsxv3_plugin
@ -124,15 +123,30 @@ class TestNsxVExtendedSGRule(test_nsxv_plugin.NsxVSecurityGroupsTestCase,
class TestNSXv3ExtendedSGRule(test_nsxv3_plugin.NsxV3PluginTestCaseMixin,
LocalIPPrefixExtTestCase):
def test_create_rule_with_local_ip_prefix(self):
local_ip_prefix = '239.255.0.0/16'
dest = v3_fw.get_ip_cidr_reference(local_ip_prefix, v3_fw.IPV4)
sg_rules = [
{'tenant_id': mock.ANY,
'id': mock.ANY,
'port_range_min': None,
'local_ip_prefix': '239.255.0.0/16',
'ethertype': 'IPv4',
'protocol': u'udp', 'remote_ip_prefix': '10.0.0.0/24',
'port_range_max': None,
'security_group_id': mock.ANY,
'remote_group_id': None, 'direction': u'ingress',
'description': ''}]
with mock.patch.object(v3_fw, 'get_firewall_rule_dict',
side_effect=v3_fw.get_firewall_rule_dict) as mock_rule:
with mock.patch(
"vmware_nsx.nsxlib.v3.NsxLib.create_firewall_rules",
side_effect=test_nsxv3_plugin._mock_create_firewall_rules,
) as mock_rule:
super(TestNSXv3ExtendedSGRule,
self).test_create_rule_with_local_ip_prefix()
mock_rule.assert_called_with(mock.ANY, mock.ANY, dest, mock.ANY,
v3_fw.IPV4, mock.ANY, v3_fw.ALLOW,
mock.ANY)
mock_rule.assert_called_with(
mock.ANY, # content
mock.ANY, # firewall_section_id
mock.ANY, # ns_group_id
False, # logging
'ALLOW', # action
sg_rules) # sg_rules

View File

@ -18,7 +18,8 @@ from neutron.extensions import securitygroup as ext_sg
from neutron.tests.unit.extensions import test_securitygroup as test_ext_sg
from vmware_nsx.nsxlib.v3 import dfw_api as firewall
from vmware_nsx.nsxlib.v3 import security
from vmware_nsx.nsxlib.v3 import exceptions as nsxlib_exc
from vmware_nsx.nsxlib.v3 import ns_group_manager
from vmware_nsx.plugins.nsx_v3 import plugin as nsx_plugin
from vmware_nsx.tests.unit.nsx_v3 import test_plugin as test_nsxv3
from vmware_nsx.tests.unit.nsxlib.v3 import nsxlib_testcase
@ -44,11 +45,13 @@ def _mock_create_and_list_nsgroups(test_method):
return nsgroup
def wrap(*args, **kwargs):
with mock.patch.object(nsx_plugin.security.firewall,
'create_nsgroup') as create_nsgroup_mock:
with mock.patch(
'vmware_nsx.nsxlib.v3.NsxLib.create_nsgroup'
) as create_nsgroup_mock:
create_nsgroup_mock.side_effect = _create_nsgroup_mock
with mock.patch.object(nsx_plugin.security.firewall,
'list_nsgroups') as list_nsgroups_mock:
with mock.patch(
"vmware_nsx.nsxlib.v3.NsxLib.list_nsgroups"
) as list_nsgroups_mock:
list_nsgroups_mock.side_effect = lambda: nsgroups
test_method(*args, **kwargs)
return wrap
@ -71,8 +74,8 @@ class TestSecurityGroupsNoDynamicCriteria(test_nsxv3.NsxV3PluginTestCaseMixin,
self._patchers.append(mock_nsx_version)
@_mock_create_and_list_nsgroups
@mock.patch.object(firewall, 'remove_nsgroup_member')
@mock.patch.object(firewall, 'add_nsgroup_members')
@mock.patch('vmware_nsx.nsxlib.v3.NsxLib.remove_nsgroup_member')
@mock.patch('vmware_nsx.nsxlib.v3.NsxLib.add_nsgroup_members')
def test_create_port_with_multiple_security_groups(self,
add_member_mock,
remove_member_mock):
@ -86,8 +89,8 @@ class TestSecurityGroupsNoDynamicCriteria(test_nsxv3.NsxV3PluginTestCaseMixin,
add_member_mock.assert_has_calls(calls, any_order=True)
@_mock_create_and_list_nsgroups
@mock.patch.object(firewall, 'remove_nsgroup_member')
@mock.patch.object(firewall, 'add_nsgroup_members')
@mock.patch('vmware_nsx.nsxlib.v3.NsxLib.remove_nsgroup_member')
@mock.patch('vmware_nsx.nsxlib.v3.NsxLib.add_nsgroup_members')
def test_update_port_with_multiple_security_groups(self,
add_member_mock,
remove_member_mock):
@ -103,8 +106,8 @@ class TestSecurityGroupsNoDynamicCriteria(test_nsxv3.NsxV3PluginTestCaseMixin,
NSG_IDS[0], firewall.LOGICAL_PORT, mock.ANY)
@_mock_create_and_list_nsgroups
@mock.patch.object(firewall, 'remove_nsgroup_member')
@mock.patch.object(firewall, 'add_nsgroup_members')
@mock.patch('vmware_nsx.nsxlib.v3.NsxLib.remove_nsgroup_member')
@mock.patch('vmware_nsx.nsxlib.v3.NsxLib.add_nsgroup_members')
def test_update_port_remove_security_group_empty_list(self,
add_member_mock,
remove_member_mock):
@ -117,12 +120,12 @@ class TestSecurityGroupsNoDynamicCriteria(test_nsxv3.NsxV3PluginTestCaseMixin,
NSG_IDS[1], firewall.LOGICAL_PORT, mock.ANY)
@_mock_create_and_list_nsgroups
@mock.patch.object(firewall, 'add_nsgroup_members')
@mock.patch('vmware_nsx.nsxlib.v3.NsxLib.add_nsgroup_members')
def test_create_port_with_full_security_group(self, add_member_mock):
def _add_member_mock(nsgroup, target_type, target_id):
if nsgroup in NSG_IDS:
raise firewall.NSGroupIsFull(nsgroup_id=nsgroup)
raise nsxlib_exc.NSGroupIsFull(nsgroup_id=nsgroup)
add_member_mock.side_effect = _add_member_mock
with self.network() as net:
@ -135,14 +138,14 @@ class TestSecurityGroupsNoDynamicCriteria(test_nsxv3.NsxV3PluginTestCaseMixin,
res_body['NeutronError']['type'])
@_mock_create_and_list_nsgroups
@mock.patch.object(firewall, 'remove_nsgroup_member')
@mock.patch.object(firewall, 'add_nsgroup_members')
@mock.patch('vmware_nsx.nsxlib.v3.NsxLib.remove_nsgroup_member')
@mock.patch('vmware_nsx.nsxlib.v3.NsxLib.add_nsgroup_members')
def test_update_port_with_full_security_group(self,
add_member_mock,
remove_member_mock):
def _add_member_mock(nsgroup, target_type, target_id):
if nsgroup == NSG_IDS[2]:
raise firewall.NSGroupIsFull(nsgroup_id=nsgroup)
raise nsxlib_exc.NSGroupIsFull(nsgroup_id=nsgroup)
add_member_mock.side_effect = _add_member_mock
with self.port() as port:
@ -174,13 +177,13 @@ class TestSecurityGroupsNoDynamicCriteria(test_nsxv3.NsxV3PluginTestCaseMixin,
class TestNSGroupManager(nsxlib_testcase.NsxLibTestCase):
"""
This test suite is responsible for unittesting of class
vmware_nsx.nsxlib.v3.security.NSGroupManager.
vmware_nsx.nsxlib.v3.ns_group_manager.NSGroupManager.
"""
@_mock_create_and_list_nsgroups
def test_first_initialization(self):
size = 5
cont_manager = security.NSGroupManager(size)
cont_manager = ns_group_manager.NSGroupManager(size)
nested_groups = cont_manager.nested_groups
self.assertEqual({i: NSG_IDS[i] for i in range(size)},
nested_groups)
@ -194,17 +197,17 @@ class TestNSGroupManager(nsxlib_testcase.NsxLibTestCase):
size = 2
# Creates 2 nested groups.
security.NSGroupManager(size)
ns_group_manager.NSGroupManager(size)
size = 5
# Creates another 3 nested groups.
nested_groups = security.NSGroupManager(size).nested_groups
nested_groups = ns_group_manager.NSGroupManager(size).nested_groups
self.assertEqual({i: NSG_IDS[i] for i in range(size)},
nested_groups)
@_mock_create_and_list_nsgroups
@mock.patch.object(firewall, 'remove_nsgroup_member')
@mock.patch.object(firewall, 'add_nsgroup_members')
@mock.patch('vmware_nsx.nsxlib.v3.NsxLib.remove_nsgroup_member')
@mock.patch('vmware_nsx.nsxlib.v3.NsxLib.add_nsgroup_members')
def test_add_and_remove_nsgroups(self,
add_member_mock,
remove_member_mock):
@ -212,7 +215,7 @@ class TestNSGroupManager(nsxlib_testcase.NsxLibTestCase):
# according to its id and the number of nested groups.
size = 5
cont_manager = security.NSGroupManager(size)
cont_manager = ns_group_manager.NSGroupManager(size)
nsgroup_id = 'nsgroup_id'
with mock.patch.object(cont_manager, '_hash_uuid', return_value=7):
@ -227,25 +230,26 @@ class TestNSGroupManager(nsxlib_testcase.NsxLibTestCase):
NSG_IDS[2], firewall.NSGROUP, nsgroup_id, verify=True)
@_mock_create_and_list_nsgroups
@mock.patch.object(firewall, 'remove_nsgroup_member')
@mock.patch.object(firewall, 'add_nsgroup_members')
@mock.patch('vmware_nsx.nsxlib.v3.NsxLib.remove_nsgroup_member')
@mock.patch('vmware_nsx.nsxlib.v3.NsxLib.add_nsgroup_members')
def test_when_nested_group_is_full(self,
add_member_mock,
remove_member_mock):
def _add_member_mock(nsgroup, target_type, target_id):
if nsgroup == NSG_IDS[2]:
raise firewall.NSGroupIsFull(nsgroup_id=nsgroup)
raise nsxlib_exc.NSGroupIsFull(nsgroup_id=nsgroup)
def _remove_member_mock(nsgroup, target_type, target_id, verify=False):
if nsgroup == NSG_IDS[2]:
raise firewall.NSGroupMemberNotFound()
raise nsxlib_exc.NSGroupMemberNotFound(nsgroup_id=nsgroup,
member_id=target_id)
add_member_mock.side_effect = _add_member_mock
remove_member_mock.side_effect = _remove_member_mock
size = 5
cont_manager = security.NSGroupManager(size)
cont_manager = ns_group_manager.NSGroupManager(size)
nsgroup_id = 'nsgroup_id'
with mock.patch.object(cont_manager, '_hash_uuid', return_value=7):
@ -270,20 +274,20 @@ class TestNSGroupManager(nsxlib_testcase.NsxLibTestCase):
remove_member_mock.assert_has_calls(calls)
@_mock_create_and_list_nsgroups
@mock.patch.object(firewall, 'remove_nsgroup_member')
@mock.patch.object(firewall, 'add_nsgroup_members')
@mock.patch('vmware_nsx.nsxlib.v3.NsxLib.remove_nsgroup_member')
@mock.patch('vmware_nsx.nsxlib.v3.NsxLib.add_nsgroup_members')
def initialize_with_absent_nested_groups(self,
add_member_mock,
remove_member_mock):
size = 3
cont_manager = security.NSGroupManager(size)
cont_manager = ns_group_manager.NSGroupManager(size)
# list_nsgroups will return nested group 1 and 3, but not group 2.
with mock.patch.object(firewall,
'list_nsgroups_mock') as list_nsgroups_mock:
list_nsgroups_mock = lambda: list_nsgroups_mock()[::2]
# invoking the initialization process again, it should process
# groups 1 and 3 and create group 2.
cont_manager = security.NSGroupManager(size)
cont_manager = ns_group_manager.NSGroupManager(size)
self.assertEqual({1: NSG_IDS[0],
2: NSG_IDS[3],
3: NSG_IDS[2]},
@ -292,7 +296,7 @@ class TestNSGroupManager(nsxlib_testcase.NsxLibTestCase):
@_mock_create_and_list_nsgroups
def test_suggest_nested_group(self):
size = 5
cont_manager = security.NSGroupManager(size)
cont_manager = ns_group_manager.NSGroupManager(size)
# We expect that the first suggested index is 2
expected_suggested_groups = NSG_IDS[2:5] + NSG_IDS[:2]
suggest_group = lambda: cont_manager._suggest_nested_group('fake-id')

View File

@ -42,7 +42,6 @@ from neutron import version
from neutron_lib import constants
from neutron_lib import exceptions as n_exc
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
from vmware_nsx.common import exceptions as nsx_exc
@ -50,8 +49,6 @@ from vmware_nsx.common import nsx_constants
from vmware_nsx.common import utils
from vmware_nsx.db import db as nsx_db
from vmware_nsx.extensions import advancedserviceproviders as as_providers
from vmware_nsx.nsxlib.v3 import client as nsx_client
from vmware_nsx.nsxlib.v3 import cluster as nsx_cluster
from vmware_nsx.nsxlib.v3 import resources as nsx_resources
from vmware_nsx.plugins.nsx_v3 import plugin as nsx_plugin
from vmware_nsx.tests import unit as vmware
@ -63,6 +60,84 @@ from vmware_nsx.tests.unit.nsxlib.v3 import nsxlib_testcase
PLUGIN_NAME = 'vmware_nsx.plugin.NsxV3Plugin'
def _mock_create_firewall_rules(*args):
# NOTE(arosen): the code in the neutron plugin expects the
# neutron rule id as the display_name.
rules = args[5]
return {
'rules': [
{'display_name': rule['id'], 'id': uuidutils.generate_uuid()}
for rule in rules
]}
def _mock_nsx_backend_calls():
mock.patch("vmware_nsx.nsxlib.v3.client.NSX3Client").start()
class FakeProfile(object):
profile_id = uuidutils.generate_uuid()
profile_type = 'FakeProfile'
def _init_nsx_profiles():
return (
FakeProfile(), # _psec_profile
FakeProfile(), # _no_psec_profile_id
FakeProfile(), # _dhcp_profile
FakeProfile(), # _mac_learning_profile
)
def _return_id_key(*args, **kwargs):
return {'id': uuidutils.generate_uuid()}
def _return_id(*args, **kwargs):
return uuidutils.generate_uuid()
mock.patch(
"vmware_nsx.plugins.nsx_v3.plugin.NsxV3Plugin._init_nsx_profiles",
side_effect=_init_nsx_profiles).start()
mock.patch(
"vmware_nsx.plugins.nsx_v3.plugin.NsxV3Plugin"
"._get_port_security_profile_id", return_value=FakeProfile()
).start()
mock.patch(
"vmware_nsx.nsxlib.v3.router.RouterLib.validate_tier0").start()
mock.patch(
"vmware_nsx.nsxlib.v3.resources.SwitchingProfile."
"create_port_mirror_profile",
side_effect=_return_id_key).start()
mock.patch(
"vmware_nsx.nsxlib.v3.NsxLib.get_bridge_cluster_id_by_name_or_id",
return_value=uuidutils.generate_uuid()).start()
mock.patch(
"vmware_nsx.nsxlib.v3.NsxLib.create_bridge_endpoint",
side_effect=_return_id_key).start()
mock.patch(
"vmware_nsx.nsxlib.v3.NsxLib.create_logical_switch",
side_effect=_return_id_key).start()
mock.patch(
"vmware_nsx.nsxlib.v3.resources.LogicalPort.create",
side_effect=_return_id_key).start()
mock.patch(
"vmware_nsx.nsxlib.v3.resources.LogicalRouter.create",
side_effect=_return_id_key).start()
mock.patch(
"vmware_nsx.nsxlib.v3.resources.LogicalDhcpServer.create",
side_effect=_return_id_key).start()
mock.patch(
"vmware_nsx.nsxlib.v3.resources.LogicalDhcpServer.create_binding",
side_effect=_return_id_key).start()
class NsxV3PluginTestCaseMixin(test_plugin.NeutronDbPluginV2TestCase,
nsxlib_testcase.NsxClientTestCase):
@ -72,83 +147,8 @@ class NsxV3PluginTestCaseMixin(test_plugin.NeutronDbPluginV2TestCase,
self._patchers = []
self.mock_api = nsx_v3_mocks.MockRequestSessionApi()
_mock_nsx_backend_calls()
nsxlib_testcase.NsxClientTestCase.setup_conf_overrides()
self.cluster = nsx_cluster.NSXClusteredAPI(
http_provider=nsxlib_testcase.MemoryMockAPIProvider(self.mock_api))
def _patch_object(*args, **kwargs):
patcher = mock.patch.object(*args, **kwargs)
patcher.start()
self._patchers.append(patcher)
def _new_cluster(*args, **kwargs):
return self.cluster
self.mocked_rest_fns(
nsx_plugin.security.firewall, 'nsxclient',
mock_cluster=self.cluster)
self.mocked_rest_fns(
nsx_plugin.router.nsxlib, 'client', mock_cluster=self.cluster)
mock_client_module = mock.Mock()
mock_cluster_module = mock.Mock()
mocked_client = self.new_mocked_client(
nsx_client.NSX3Client, mock_cluster=self.cluster)
mock_cluster_module.NSXClusteredAPI.return_value = self.cluster
mock_client_module.NSX3Client.return_value = mocked_client
_patch_object(nsx_plugin, 'nsx_client', new=mock_client_module)
_patch_object(nsx_plugin, 'nsx_cluster', new=mock_cluster_module)
self._mock_client_module = mock_client_module
self._mock_cluster_module = mock_cluster_module
# Mock the nsx v3 version
mock_nsxlib_get_version = mock.patch(
"vmware_nsx.nsxlib.v3.get_version",
return_value='1.1.0')
mock_nsxlib_get_version.start()
# populate pre-existing mock resources
cluster_id = uuidutils.generate_uuid()
self.mock_api.post(
'api/v1/logical-routers',
data=jsonutils.dumps({
'display_name': nsx_v3_mocks.DEFAULT_TIER0_ROUTER_UUID,
'router_type': "TIER0",
'id': nsx_v3_mocks.DEFAULT_TIER0_ROUTER_UUID,
'edge_cluster_id': cluster_id}),
headers=nsx_client.JSONRESTClient._DEFAULT_HEADERS)
self.mock_api.post(
'api/v1/edge-clusters',
data=jsonutils.dumps({
'id': cluster_id,
'members': [
{'member_index': 0},
{'member_index': 1}
]}),
headers=nsx_client.JSONRESTClient._DEFAULT_HEADERS)
self.mock_api.post(
'api/v1/switching-profiles',
data=jsonutils.dumps({
'id': uuidutils.generate_uuid(),
'display_name': nsx_plugin.NSX_V3_NO_PSEC_PROFILE_NAME
}), headers=nsx_client.JSONRESTClient._DEFAULT_HEADERS)
self.mock_api.post(
'api/v1/transport-zones',
data=jsonutils.dumps({
'id': uuidutils.generate_uuid(),
'display_name': nsxlib_testcase.NSX_TZ_NAME
}), headers=nsx_client.JSONRESTClient._DEFAULT_HEADERS)
self.mock_api.post(
'api/v1/bridge-clusters',
data=jsonutils.dumps({
'id': uuidutils.generate_uuid(),
'display_name': nsx_v3_mocks.NSX_BRIDGE_CLUSTER_NAME
}), headers=nsx_client.JSONRESTClient._DEFAULT_HEADERS)
super(NsxV3PluginTestCaseMixin, self).setUp(plugin=plugin,
ext_mgr=ext_mgr)
@ -646,8 +646,8 @@ class TestL3NatTestCase(L3NatTest,
self.skipTest('not supported')
class ExtGwModeTestCase(L3NatTest,
test_ext_gw_mode.ExtGwModeIntTestCase):
class ExtGwModeTestCase(test_ext_gw_mode.ExtGwModeIntTestCase,
L3NatTest):
def test_router_gateway_set_fail_after_port_create(self):
self.skipTest("TBD")
@ -869,8 +869,10 @@ class NsxNativeDhcpTestCase(NsxV3PluginTestCaseMixin):
def _verify_dhcp_binding(self, subnet, port_data, update_data,
assert_data):
# Verify if DHCP binding is updated.
with mock.patch.object(nsx_resources.LogicalDhcpServer,
'update_binding') as update_dhcp_binding:
with mock.patch(
'vmware_nsx.nsxlib.v3.resources.LogicalDhcpServer.update_binding'
) as update_dhcp_binding:
device_owner = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'None'
device_id = uuidutils.generate_uuid()
with self.port(subnet=subnet, device_owner=device_owner,

View File

@ -18,8 +18,10 @@ import mock
import unittest
from oslo_config import cfg
from oslo_utils import uuidutils
from requests import exceptions as requests_exceptions
from vmware_nsx.nsxlib import v3 as nsxlib
from vmware_nsx.nsxlib.v3 import client as nsx_client
from vmware_nsx.nsxlib.v3 import cluster as nsx_cluster
@ -39,6 +41,54 @@ BRIDGE_FNS = ['create_resource', 'delete_resource',
'update_resource', 'get_resource']
def _mock_nsxlib():
def _return_id_key(*args, **kwargs):
return {'id': uuidutils.generate_uuid()}
# FIXME(arosen): this is duplicated in test_plugin
def _mock_create_firewall_rules(*args):
# NOTE(arosen): the code in the neutron plugin expects the
# neutron rule id as the display_name.
rules = args[5]
return {
'rules': [
{'display_name': rule['id'], 'id': uuidutils.generate_uuid()}
for rule in rules
]}
mock.patch(
"vmware_nsx.nsxlib.v3.cluster.NSXRequestsHTTPProvider"
".validate_connection").start()
mock.patch(
"vmware_nsx.nsxlib.v3.NsxLib.create_nsgroup",
side_effect=_return_id_key
).start()
mock.patch(
"vmware_nsx.nsxlib.v3.NsxLib.create_empty_section",
side_effect=_return_id_key).start()
mock.patch(
"vmware_nsx.nsxlib.v3.NsxLib._init_default_section",
side_effect=_return_id_key).start()
mock.patch(
"vmware_nsx.nsxlib.v3.NsxLib.list_nsgroups").start()
mock.patch(
"vmware_nsx.nsxlib.v3.NsxLib.create_firewall_rules",
side_effect=_mock_create_firewall_rules).start()
mock.patch(
"vmware_nsx.nsxlib.v3.NsxLib.get_transport_zone_id_by_name_or_id",
side_effect=_return_id_key).start()
mock.patch(
"vmware_nsx.nsxlib.v3.NsxLib.get_version",
return_value='1.1.0').start()
class NsxLibTestCase(unittest.TestCase):
@classmethod
@ -57,12 +107,27 @@ class NsxLibTestCase(unittest.TestCase):
cfg.CONF.set_override('http_timeout', NSX_HTTP_TIMEOUT, 'nsx_v3')
cfg.CONF.set_override('http_read_timeout',
NSX_HTTP_READ_TIMEOUT, 'nsx_v3')
cfg.CONF.set_override('network_scheduler_driver',
cfg.CONF.set_override(
'network_scheduler_driver',
'neutron.scheduler.dhcp_agent_scheduler.AZAwareWeightScheduler')
def setUp(self, *args, **kwargs):
super(NsxLibTestCase, self).setUp()
NsxClientTestCase.setup_conf_overrides()
_mock_nsxlib()
self.nsxlib = nsxlib.NsxLib(
username=cfg.CONF.nsx_v3.nsx_api_user,
password=cfg.CONF.nsx_v3.nsx_api_password,
retries=cfg.CONF.nsx_v3.http_retries,
insecure=cfg.CONF.nsx_v3.insecure,
ca_file=cfg.CONF.nsx_v3.ca_file,
concurrent_connections=cfg.CONF.nsx_v3.concurrent_connections,
http_timeout=cfg.CONF.nsx_v3.http_timeout,
http_read_timeout=cfg.CONF.nsx_v3.http_read_timeout,
conn_idle_timeout=cfg.CONF.nsx_v3.conn_idle_timeout,
http_provider=None,
max_attempts=cfg.CONF.nsx_v3.retries)
# print diffs when assert comparisons fail
self.maxDiff = None
@ -254,9 +319,10 @@ class NsxClientTestCase(NsxLibTestCase):
return client
def mocked_rest_fns(self, module, attr, mock_validate=True,
mock_cluster=None):
client = nsx_client.NSX3Client(
mock_cluster or self.mock_nsx_clustered_api())
mock_cluster=None, client=None):
if client is None:
client = nsx_client.NSX3Client(
mock_cluster or self.mock_nsx_clustered_api())
mocked_fns = NsxClientTestCase.MockBridge(client)
mocked_fns.JSONRESTClient = nsx_client.JSONRESTClient

View File

@ -18,8 +18,8 @@ import copy
from oslo_log import log
from oslo_serialization import jsonutils
from vmware_nsx.common import exceptions as exep
from vmware_nsx.nsxlib.v3 import client
from vmware_nsx.nsxlib.v3 import exceptions as nsxlib_exc
from vmware_nsx.tests.unit.nsx_v3 import mocks
from vmware_nsx.tests.unit.nsxlib.v3 import nsxlib_testcase
@ -160,7 +160,7 @@ class NsxV3RESTClientTestCase(nsxlib_testcase.NsxClientTestCase):
def test_client_create(self):
api = self.new_mocked_client(client.RESTClient,
url_prefix='api/v1/ports')
api.create(jsonutils.dumps({'resource-name': 'port1'}))
api.create(body=jsonutils.dumps({'resource-name': 'port1'}))
assert_call(
'post', api,
@ -235,7 +235,7 @@ class NsxV3RESTClientTestCase(nsxlib_testcase.NsxClientTestCase):
for code in client.RESTClient._VERB_RESP_CODES.get(verb):
_verb_response_code(verb, code)
self.assertRaises(
exep.ManagerError,
nsxlib_exc.ManagerError,
_verb_response_code, verb, 500)

View File

@ -15,13 +15,14 @@
#
import mock
import six.moves.urllib.parse as urlparse
import unittest
from oslo_config import cfg
from oslo_serialization import jsonutils
from requests import exceptions as requests_exceptions
from vmware_nsx.common import exceptions as nsx_exc
from vmware_nsx.nsxlib.v3 import client
from vmware_nsx.nsxlib.v3 import cluster
from vmware_nsx.nsxlib.v3 import exceptions as nsxlib_exc
from vmware_nsx.tests.unit.nsx_v3 import mocks
from vmware_nsx.tests.unit.nsxlib.v3 import nsxlib_testcase
@ -34,7 +35,7 @@ def _validate_conn_down(*args, **kwargs):
raise requests_exceptions.ConnectionError()
class RequestsHTTPProviderTestCase(nsxlib_testcase.NsxClientTestCase):
class RequestsHTTPProviderTestCase(unittest.TestCase):
def test_new_connection(self):
mock_api = mock.Mock()
@ -56,11 +57,12 @@ class RequestsHTTPProviderTestCase(nsxlib_testcase.NsxClientTestCase):
self.assertEqual(session.timeout, 99)
def test_validate_connection(self):
self.skipTest("Revist")
mock_conn = mocks.MockRequestSessionApi()
mock_ep = mock.Mock()
mock_ep.provider.url = 'https://1.2.3.4'
provider = cluster.NSXRequestsHTTPProvider()
self.assertRaises(nsx_exc.ResourceNotFound,
self.assertRaises(nsxlib_exc.ResourceNotFound,
provider.validate_connection,
mock.Mock(), mock_ep, mock_conn)
@ -190,17 +192,17 @@ class ClusteredAPITestCase(nsxlib_testcase.NsxClientTestCase):
api = cluster.NSXClusteredAPI(http_provider=mock_provider)
self.assertEqual(len(api.endpoints), 3)
self.assertRaises(nsx_exc.ServiceClusterUnavailable,
self.assertRaises(nsxlib_exc.ServiceClusterUnavailable,
api.get, 'api/v1/transport-zones')
def test_cluster_proxy_stale_revision(self):
def stale_revision():
raise nsx_exc.StaleRevision(manager='1.1.1.1',
operation='whatever')
raise nsxlib_exc.StaleRevision(manager='1.1.1.1',
operation='whatever')
api = self.mock_nsx_clustered_api(session_response=stale_revision)
self.assertRaises(nsx_exc.StaleRevision,
self.assertRaises(nsxlib_exc.StaleRevision,
api.get, 'api/v1/transport-zones')
def test_cluster_proxy_connection_error(self):
@ -210,7 +212,7 @@ class ClusteredAPITestCase(nsxlib_testcase.NsxClientTestCase):
api = self.mock_nsx_clustered_api(session_response=connect_timeout)
api._validate = mock.Mock()
self.assertRaises(nsx_exc.ServiceClusterUnavailable,
self.assertRaises(nsxlib_exc.ServiceClusterUnavailable,
api.get, 'api/v1/transport-zones')
def test_cluster_round_robin_servicing(self):

View File

@ -16,12 +16,9 @@
import mock
from oslo_log import log
from oslo_serialization import jsonutils
from vmware_nsx.nsxlib import v3 as nsxlib
from vmware_nsx.tests.unit.nsx_v3 import test_constants as test_constants_v3
from vmware_nsx.tests.unit.nsxlib.v3 import nsxlib_testcase
from vmware_nsx.tests.unit.nsxlib.v3 import test_client
LOG = log.getLogger(__name__)
@ -35,7 +32,7 @@ class NsxLibQosTestCase(nsxlib_testcase.NsxClientTestCase):
"tags": []
}
if qos_marking:
body = nsxlib._update_dscp_in_args(body, qos_marking, dscp)
body = self.nsxlib._update_dscp_in_args(body, qos_marking, dscp)
body["display_name"] = test_constants_v3.FAKE_NAME
body["description"] = description
@ -66,7 +63,8 @@ class NsxLibQosTestCase(nsxlib_testcase.NsxClientTestCase):
break
if qos_marking:
body = nsxlib._update_dscp_in_args(body, qos_marking, dscp)
body = self.nsxlib._update_dscp_in_args(
body, qos_marking, dscp)
return body
@ -74,47 +72,39 @@ class NsxLibQosTestCase(nsxlib_testcase.NsxClientTestCase):
"""
Test creating a qos-switching profile returns the correct response
"""
api = self.mocked_rest_fns(nsxlib, 'client')
nsxlib.create_qos_switching_profile(
tags=[],
name=test_constants_v3.FAKE_NAME,
description=test_constants_v3.FAKE_NAME)
test_client.assert_json_call(
'post', api,
'https://1.2.3.4/api/v1/switching-profiles',
data=jsonutils.dumps(self._body(),
sort_keys=True))
with mock.patch.object(self.nsxlib.client, 'create') as create:
self.nsxlib.create_qos_switching_profile(
tags=[],
name=test_constants_v3.FAKE_NAME,
description=test_constants_v3.FAKE_NAME)
create.assert_called_with(
'switching-profiles', self._body())
def test_update_qos_switching_profile(self):
"""
Test updating a qos-switching profile returns the correct response
"""
api = self.mocked_rest_fns(nsxlib, 'client')
original_profile = self._body()
new_description = "Test"
with mock.patch.object(nsxlib.client, 'get_resource',
return_value=original_profile):
# update the description of the profile
nsxlib.update_qos_switching_profile(
test_constants_v3.FAKE_QOS_PROFILE['id'],
tags=[],
description=new_description)
with mock.patch.object(self.nsxlib.client, 'get',
return_value=original_profile):
with mock.patch.object(self.nsxlib.client, 'update') as update:
test_client.assert_json_call(
'put', api,
'https://1.2.3.4/api/v1/switching-profiles/%s'
% test_constants_v3.FAKE_QOS_PROFILE['id'],
data=jsonutils.dumps(self._body(description=new_description),
sort_keys=True))
# update the description of the profile
self.nsxlib.update_qos_switching_profile(
test_constants_v3.FAKE_QOS_PROFILE['id'],
tags=[],
description=new_description)
update.assert_called_with(
'switching-profiles/%s'
% test_constants_v3.FAKE_QOS_PROFILE['id'],
self._body(description=new_description))
def test_enable_qos_switching_profile_shaping(self):
"""
Test updating a qos-switching profile returns the correct response
"""
api = self.mocked_rest_fns(nsxlib, 'client')
original_profile = self._body_with_shaping()
burst_size = 100
@ -122,37 +112,34 @@ class NsxLibQosTestCase(nsxlib_testcase.NsxClientTestCase):
average_bandwidth = 300
qos_marking = "untrusted"
dscp = 10
with mock.patch.object(nsxlib.client, 'get_resource',
return_value=original_profile):
# update the bw shaping of the profile
nsxlib.update_qos_switching_profile_shaping(
test_constants_v3.FAKE_QOS_PROFILE['id'],
shaping_enabled=True,
burst_size=burst_size,
peak_bandwidth=peak_bandwidth,
average_bandwidth=average_bandwidth,
qos_marking=qos_marking,
dscp=dscp)
test_client.assert_json_call(
'put', api,
'https://1.2.3.4/api/v1/switching-profiles/%s'
% test_constants_v3.FAKE_QOS_PROFILE['id'],
data=jsonutils.dumps(
with mock.patch.object(self.nsxlib.client, 'get',
return_value=original_profile):
with mock.patch.object(self.nsxlib.client, 'update') as update:
# update the bw shaping of the profile
self.nsxlib.update_qos_switching_profile_shaping(
test_constants_v3.FAKE_QOS_PROFILE['id'],
shaping_enabled=True,
burst_size=burst_size,
peak_bandwidth=peak_bandwidth,
average_bandwidth=average_bandwidth,
qos_marking=qos_marking,
dscp=dscp)
update.assert_called_with(
'switching-profiles/%s'
% test_constants_v3.FAKE_QOS_PROFILE['id'],
self._body_with_shaping(
shaping_enabled=True,
burst_size=burst_size,
peak_bandwidth=peak_bandwidth,
average_bandwidth=average_bandwidth,
qos_marking="untrusted", dscp=10),
sort_keys=True))
qos_marking="untrusted", dscp=10))
def test_disable_qos_switching_profile_shaping(self):
"""
Test updating a qos-switching profile returns the correct response
"""
api = self.mocked_rest_fns(nsxlib, 'client')
burst_size = 100
peak_bandwidth = 200
average_bandwidth = 300
@ -163,31 +150,27 @@ class NsxLibQosTestCase(nsxlib_testcase.NsxClientTestCase):
average_bandwidth=average_bandwidth,
qos_marking="untrusted",
dscp=10)
with mock.patch.object(nsxlib.client, 'get_resource',
return_value=original_profile):
# update the bw shaping of the profile
nsxlib.update_qos_switching_profile_shaping(
test_constants_v3.FAKE_QOS_PROFILE['id'],
shaping_enabled=False, qos_marking="trusted")
test_client.assert_json_call(
'put', api,
'https://1.2.3.4/api/v1/switching-profiles/%s'
% test_constants_v3.FAKE_QOS_PROFILE['id'],
data=jsonutils.dumps(
self._body_with_shaping(qos_marking="trusted"),
sort_keys=True))
with mock.patch.object(self.nsxlib.client, 'get',
return_value=original_profile):
with mock.patch.object(self.nsxlib.client, 'update') as update:
# update the bw shaping of the profile
self.nsxlib.update_qos_switching_profile_shaping(
test_constants_v3.FAKE_QOS_PROFILE['id'],
shaping_enabled=False, qos_marking="trusted")
update.assert_called_with(
'switching-profiles/%s'
% test_constants_v3.FAKE_QOS_PROFILE['id'],
self._body_with_shaping(qos_marking="trusted"))
def test_delete_qos_switching_profile(self):
"""
Test deleting qos-switching-profile
"""
api = self.mocked_rest_fns(nsxlib, 'client')
nsxlib.delete_qos_switching_profile(
test_constants_v3.FAKE_QOS_PROFILE['id'])
test_client.assert_json_call(
'delete', api,
'https://1.2.3.4/api/v1/switching-profiles/%s'
% test_constants_v3.FAKE_QOS_PROFILE['id'])
with mock.patch.object(self.nsxlib.client, 'delete') as delete:
self.nsxlib.delete_qos_switching_profile(
test_constants_v3.FAKE_QOS_PROFILE['id'])
delete.assert_called_with(
'switching-profiles/%s'
% test_constants_v3.FAKE_QOS_PROFILE['id'])

View File

@ -13,14 +13,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
import mock
from oslo_log import log
from oslo_serialization import jsonutils
from vmware_nsx.common import nsx_constants
from vmware_nsx.nsxlib import v3 as nsxlib
from vmware_nsx.tests.unit.nsx_v3 import mocks as nsx_v3_mocks
from vmware_nsx.tests.unit.nsxlib.v3 import nsxlib_testcase
from vmware_nsx.tests.unit.nsxlib.v3 import test_client
LOG = log.getLogger(__name__)
@ -46,59 +45,49 @@ class NsxLibSwitchTestCase(nsxlib_testcase.NsxClientTestCase):
"""
Test creating a switch returns the correct response and 200 status
"""
api = self.mocked_rest_fns(nsxlib, 'client')
nsxlib.create_logical_switch(
nsx_v3_mocks.FAKE_NAME, NsxLibSwitchTestCase._tz_id, [])
test_client.assert_json_call(
'post', api,
'https://1.2.3.4/api/v1/logical-switches',
data=jsonutils.dumps(self._create_body(), sort_keys=True))
with mock.patch.object(self.nsxlib.client, 'create') as create:
self.nsxlib.create_logical_switch(
nsx_v3_mocks.FAKE_NAME, NsxLibSwitchTestCase._tz_id, [])
create.assert_called_with('logical-switches', self._create_body())
def test_create_logical_switch_admin_down(self):
"""
Test creating switch with admin_state down
"""
api = self.mocked_rest_fns(nsxlib, 'client')
nsxlib.create_logical_switch(
nsx_v3_mocks.FAKE_NAME, NsxLibSwitchTestCase._tz_id,
[], admin_state=False)
with mock.patch.object(self.nsxlib.client, 'create') as create:
self.nsxlib.create_logical_switch(
nsx_v3_mocks.FAKE_NAME, NsxLibSwitchTestCase._tz_id,
[], admin_state=False)
test_client.assert_json_call(
'post', api,
'https://1.2.3.4/api/v1/logical-switches',
data=jsonutils.dumps(self._create_body(
admin_state=nsx_constants.ADMIN_STATE_DOWN),
sort_keys=True))
create.assert_called_with(
'logical-switches',
self._create_body(
admin_state=nsx_constants.ADMIN_STATE_DOWN))
def test_create_logical_switch_vlan(self):
"""
Test creating switch with provider:network_type VLAN
"""
api = self.mocked_rest_fns(nsxlib, 'client')
nsxlib.create_logical_switch(
nsx_v3_mocks.FAKE_NAME, NsxLibSwitchTestCase._tz_id,
[], vlan_id='123')
with mock.patch.object(self.nsxlib.client, 'create') as create:
self.nsxlib.create_logical_switch(
nsx_v3_mocks.FAKE_NAME, NsxLibSwitchTestCase._tz_id,
[], vlan_id='123')
test_client.assert_json_call(
'post', api,
'https://1.2.3.4/api/v1/logical-switches',
data=jsonutils.dumps(self._create_body(vlan_id='123'),
sort_keys=True))
create.assert_called_with(
'logical-switches',
self._create_body(vlan_id='123'))
def test_delete_logical_switch(self):
"""
Test deleting switch
"""
api = self.mocked_rest_fns(nsxlib, 'client')
fake_switch = nsx_v3_mocks.make_fake_switch()
nsxlib.delete_logical_switch(fake_switch['id'])
test_client.assert_json_call(
'delete', api,
'https://1.2.3.4/api/v1/logical-switches/%s'
'?detach=true&cascade=true' % fake_switch['id'])
with mock.patch.object(self.nsxlib.client, 'delete') as delete:
fake_switch = nsx_v3_mocks.make_fake_switch()
self.nsxlib.delete_logical_switch(fake_switch['id'])
delete.assert_called_with(
'logical-switches/%s'
'?detach=true&cascade=true' % fake_switch['id'])

View File

@ -29,7 +29,6 @@ from neutron.tests import base
from neutron_lib import exceptions as n_exc
from vmware_nsx.common import nsx_constants
from vmware_nsx.nsxlib import v3 as nsxlib
from vmware_nsx.services.l2gateway.nsx_v3 import driver as nsx_v3_driver
from vmware_nsx.tests.unit.nsx_v3 import mocks as nsx_v3_mocks
from vmware_nsx.tests.unit.nsx_v3 import test_plugin as test_nsx_v3_plugin
@ -85,8 +84,9 @@ class TestNsxV3L2GatewayDriver(test_l2gw_db.L2GWTestCase,
"nsx_v3")
nsx_v3_driver.NsxV3Driver(mock.MagicMock())
l2gws = self.driver._get_l2_gateways(self.context)
def_bridge_cluster_id = nsxlib.get_bridge_cluster_id_by_name_or_id(
def_bridge_cluster_name)
def_bridge_cluster_id = (
self.nsxlib.get_bridge_cluster_id_by_name_or_id(
def_bridge_cluster_name))
def_l2gw = None
for l2gw in l2gws:
for device in l2gw['devices']:

View File

@ -26,15 +26,14 @@ from neutron.tests.unit.services.qos import base
from vmware_nsx.common import utils
from vmware_nsx.db import db as nsx_db
from vmware_nsx.nsxlib import v3 as nsxlib
from vmware_nsx.services.qos.nsx_v3 import utils as qos_utils
from vmware_nsx.tests.unit.nsxlib.v3 import nsxlib_testcase
from vmware_nsx.tests.unit.nsx_v3 import test_plugin
PLUGIN_NAME = 'vmware_nsx.plugins.nsx_v3.plugin.NsxV3Plugin'
class TestQosNsxV3Notification(nsxlib_testcase.NsxClientTestCase,
base.BaseQosTestCase):
class TestQosNsxV3Notification(base.BaseQosTestCase,
test_plugin.NsxV3PluginTestCaseMixin):
def setUp(self):
super(TestQosNsxV3Notification, self).setUp()
@ -47,7 +46,6 @@ class TestQosNsxV3Notification(nsxlib_testcase.NsxClientTestCase,
['vmware_nsx.tests.unit.services.qos.fake_notifier.'
'DummyNotificationDriver'],
"qos")
self.qos_plugin = qos_plugin.QoSPlugin()
self.ctxt = context.Context('fake_user', 'fake_tenant')
self.policy_data = {
@ -81,7 +79,7 @@ class TestQosNsxV3Notification(nsxlib_testcase.NsxClientTestCase,
mock.patch(
'neutron.objects.qos.policy.QosPolicy.obj_load_attr').start()
mock.patch.object(nsx_db, 'get_switch_profile_by_qos_policy',
return_value=self.fake_profile_id).start()
return_value=self.fake_profile_id).start()
self.peak_bw_multiplier = cfg.CONF.NSX.qos_peak_bw_multiplier
@ -91,8 +89,10 @@ class TestQosNsxV3Notification(nsxlib_testcase.NsxClientTestCase,
@mock.patch.object(nsx_db, 'add_qos_policy_profile_mapping')
def test_policy_create_profile(self, fake_db_add, fake_rbac_create):
# test the switch profile creation when a QoS policy is created
with mock.patch.object(nsxlib, 'create_qos_switching_profile',
return_value=self.fake_profile) as create_profile:
with mock.patch(
'vmware_nsx.nsxlib.v3.NsxLib.create_qos_switching_profile',
return_value=self.fake_profile
) as create_profile:
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.policy.QosPolicy.create'):
@ -117,8 +117,9 @@ class TestQosNsxV3Notification(nsxlib_testcase.NsxClientTestCase,
# test the switch profile update when a QoS policy is updated
fields = base_object.get_updatable_fields(
policy_object.QosPolicy, self.policy_data['policy'])
with mock.patch.object(nsxlib,
'update_qos_switching_profile') as update_profile:
with mock.patch(
'vmware_nsx.nsxlib.v3.NsxLib.update_qos_switching_profile'
) as update_profile:
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.policy.QosPolicy.update'):
@ -147,8 +148,10 @@ class TestQosNsxV3Notification(nsxlib_testcase.NsxClientTestCase,
setattr(_policy, "rules", [self.rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
with mock.patch.object(nsxlib,
'update_qos_switching_profile_shaping') as update_profile:
with mock.patch(
'vmware_nsx.nsxlib.v3.NsxLib.'
'update_qos_switching_profile_shaping'
) as update_profile:
with mock.patch('neutron.objects.db.api.update_object',
return_value=self.rule_data):
self.qos_plugin.update_policy_bandwidth_limit_rule(
@ -188,8 +191,10 @@ class TestQosNsxV3Notification(nsxlib_testcase.NsxClientTestCase,
setattr(_policy, "rules", [rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
with mock.patch.object(nsxlib,
'update_qos_switching_profile_shaping') as update_profile:
with mock.patch(
'vmware_nsx.nsxlib.v3.NsxLib.'
'update_qos_switching_profile_shaping'
) as update_profile:
with mock.patch('neutron.objects.db.api.update_object',
return_value=rule_data):
self.qos_plugin.update_policy_bandwidth_limit_rule(
@ -219,8 +224,10 @@ class TestQosNsxV3Notification(nsxlib_testcase.NsxClientTestCase,
setattr(_policy, "rules", [self.dscp_rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
with mock.patch.object(nsxlib,
'update_qos_switching_profile_shaping') as update_profile:
with mock.patch(
'vmware_nsx.nsxlib.v3.NsxLib.'
'update_qos_switching_profile_shaping'
) as update_profile:
with mock.patch('neutron.objects.db.api.'
'update_object', return_value=self.dscp_rule_data):
self.qos_plugin.update_policy_dscp_marking_rule(
@ -250,8 +257,10 @@ class TestQosNsxV3Notification(nsxlib_testcase.NsxClientTestCase,
# as if it was deleted
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
with mock.patch.object(nsxlib,
'update_qos_switching_profile_shaping') as update_profile:
with mock.patch(
"vmware_nsx.nsxlib.v3.NsxLib."
"update_qos_switching_profile_shaping"
) as update_profile:
setattr(_policy, "rules", [self.rule])
self.qos_plugin.delete_policy_bandwidth_limit_rule(
self.ctxt, self.rule.id, self.policy.id)
@ -269,7 +278,9 @@ class TestQosNsxV3Notification(nsxlib_testcase.NsxClientTestCase,
@mock.patch('neutron.objects.db.api.get_object', return_value=None)
def test_policy_delete_profile(self, *mocks):
# test the switch profile deletion when a QoS policy is deleted
with mock.patch.object(nsxlib, 'delete_qos_switching_profile',
return_value=self.fake_profile) as delete_profile:
with mock.patch(
'vmware_nsx.nsxlib.v3.NsxLib.delete_qos_switching_profile',
return_value=self.fake_profile
) as delete_profile:
self.qos_plugin.delete_policy(self.ctxt, self.policy.id)
delete_profile.assert_called_once_with(self.fake_profile_id)

View File

@ -32,9 +32,7 @@ from neutron_lbaas.services.loadbalancer.drivers.vmware import db # noqa
from vmware_nsx._i18n import _
from vmware_nsx.common import config # noqa
from vmware_nsx.nsxlib.v3 import client as nsx_v3_client
from vmware_nsx.nsxlib.v3 import resources as nsx_v3_resources
from vmware_nsx.plugins.nsx_v3 import plugin as nsx_v3_plugin
from vmware_nsx.shell import resources
from vmware_nsx.tests import unit as vmware
from vmware_nsx.tests.unit.nsx_v.vshield import fake_vcns
@ -132,10 +130,7 @@ class TestNsxv3AdminUtils(AbstractTestAdminUtils,
self._patchers.append(patcher)
def _init_mock_plugin(self):
self._patch_object(nsx_v3_client, 'NSX3Client',
new=self._mock_client_module)
self._patch_object(nsx_v3_plugin, 'nsx_cluster',
new=self._mock_cluster_module)
test_v3_plugin._mock_nsx_backend_calls()
# mock resources
self._patch_object(nsx_v3_resources.LogicalPort,