From 9cc7eae5da43e54a2577b28ccc07e48a8103f04b Mon Sep 17 00:00:00 2001 From: asarfaty Date: Mon, 19 Oct 2020 10:32:57 +0200 Subject: [PATCH] V2T migration: add pre-flight checks Also remove extra address pairs in api_replay, since its only a warning at the moment. Change-Id: I31259738f69ce89c08adc0b24c4807a269460be8 --- doc/source/admin_util.rst | 4 +- vmware_nsx/api_replay/utils.py | 17 +- vmware_nsx/common/utils.py | 5 + vmware_nsx/plugins/nsx_v/vshield/vcns.py | 6 + .../admin/plugins/nsxv/resources/migration.py | 158 ++++++++++++++---- 5 files changed, 145 insertions(+), 45 deletions(-) diff --git a/doc/source/admin_util.rst b/doc/source/admin_util.rst index 3a3d0ea5ef..f3ac1aa498 100644 --- a/doc/source/admin_util.rst +++ b/doc/source/admin_util.rst @@ -323,9 +323,9 @@ Metadata V2T migration ~~~~~~~~~~~~~ -- Validate the configuration of the NSX-V plugin before migrating to NSX-T:: +- Validate the configuration of the NSX-V plugin before migrating to NSX-T. When the strict flag is true. the validation will fail on warnings as well:: - nsxadmin -r nsx-migrate-v2t -o validate [--property transit-network=] + nsxadmin -r nsx-migrate-v2t -o validate [--property transit-network=] [--property strict=true] Config ~~~~~~ diff --git a/vmware_nsx/api_replay/utils.py b/vmware_nsx/api_replay/utils.py index 1ffd080024..ee3b7f5d26 100644 --- a/vmware_nsx/api_replay/utils.py +++ b/vmware_nsx/api_replay/utils.py @@ -19,6 +19,8 @@ from oslo_config import cfg from oslo_utils import uuidutils import webob.exc +from vmware_nsxlib.v3 import nsx_constants as nsxlib_consts + logging.basicConfig(level=logging.INFO) LOG = logging.getLogger(__name__) @@ -245,19 +247,18 @@ class PrepareObjectForMigration(object): if remove_qos: body = self.drop_fields(body, ['qos_policy_id']) + num_allowed_pairs = nsxlib_consts.NUM_ALLOWED_IP_ADDRESSES_v4 - 1 if 'allowed_address_pairs' in body: if not body['allowed_address_pairs']: # remove allowed_address_pairs if empty: del body['allowed_address_pairs'] else: - # remove unsupported allowed_address_pairs - for pair in body['allowed_address_pairs']: - ip = pair.get('ip_address') - if len(ip.split('/')) > 1: - LOG.warning("ignoring allowed_address_pair %s for " - "port %s as cidr is not supported", - pair, port['id']) - body['allowed_address_pairs'].remove(pair) + if len(body['allowed_address_pairs']) > num_allowed_pairs: + body['allowed_address_pairs'] = body[ + 'allowed_address_pairs'][:num_allowed_pairs] + LOG.warning("ignoring extra allowed_address_pair for " + "port %s as only %s are allowed", + port['id'], num_allowed_pairs) # remove port security if mac learning is enabled if (body.get('mac_learning_enabled') and diff --git a/vmware_nsx/common/utils.py b/vmware_nsx/common/utils.py index ebce78f7cf..5fa07de1d6 100644 --- a/vmware_nsx/common/utils.py +++ b/vmware_nsx/common/utils.py @@ -123,6 +123,11 @@ def is_nsxv_version_6_4_6(nsx_version): version.LooseVersion('6.4.6')) +def is_nsxv_version_6_4_9(nsx_version): + return (version.LooseVersion(nsx_version) >= + version.LooseVersion('6.4.9')) + + def is_nsxv_dhcp_binding_supported(nsx_version): return ((version.LooseVersion(nsx_version) >= version.LooseVersion('6.3.3')) or diff --git a/vmware_nsx/plugins/nsx_v/vshield/vcns.py b/vmware_nsx/plugins/nsx_v/vshield/vcns.py index b7c9e8f473..f6b765e1bd 100644 --- a/vmware_nsx/plugins/nsx_v/vshield/vcns.py +++ b/vmware_nsx/plugins/nsx_v/vshield/vcns.py @@ -387,6 +387,12 @@ class Vcns(object): VIP_RESOURCE, vip_vseid) return self.do_request(HTTP_GET, uri, decode=True) + def get_vips(self, edge_id): + uri = self._build_uri_path( + edge_id, LOADBALANCER_SERVICE, + VIP_RESOURCE) + return self.do_request(HTTP_GET, uri, decode=True) + def update_vip(self, edge_id, vip_vseid, vip_new): uri = self._build_uri_path( edge_id, LOADBALANCER_SERVICE, diff --git a/vmware_nsx/shell/admin/plugins/nsxv/resources/migration.py b/vmware_nsx/shell/admin/plugins/nsxv/resources/migration.py index a4269d39ae..fc68bcefce 100644 --- a/vmware_nsx/shell/admin/plugins/nsxv/resources/migration.py +++ b/vmware_nsx/shell/admin/plugins/nsxv/resources/migration.py @@ -17,6 +17,7 @@ import sys import netaddr from oslo_log import log as logging +from networking_l2gw.db.l2gateway import l2gateway_models from neutron_lib.api.definitions import allowedaddresspairs as addr_apidef from neutron_lib.api.definitions import provider_net as pnet from neutron_lib.api import validators @@ -26,6 +27,9 @@ from neutron_lib import context as n_context from vmware_nsx.common import nsxv_constants from vmware_nsx.common import utils as c_utils +from vmware_nsx.db import nsxv_db +from vmware_nsx.plugins.nsx_v import availability_zones as nsx_az +from vmware_nsx.services.lbaas.nsx_v import lbaas_common as lb_common from vmware_nsx.services.lbaas.nsx_v3.implementation import lb_utils from vmware_nsx.services.lbaas.octavia import constants as oct_const from vmware_nsx.shell.admin.plugins.common import constants @@ -37,45 +41,79 @@ from vmware_nsxlib.v3 import nsx_constants as nsxlib_consts LOG = logging.getLogger(__name__) +def _get_router_from_network(context, plugin, subnet_id): + subnet = plugin.get_subnet(context.elevated(), subnet_id) + network_id = subnet['network_id'] + ports = plugin._get_network_interface_ports( + context.elevated(), network_id) + if ports: + return ports[0]['device_id'] + + @admin_utils.output_header def validate_config_for_migration(resource, event, trigger, **kwargs): """Validate the nsxv configuration before migration to nsx-t""" + # Read the command line parameters transit_networks = ["100.64.0.0/16"] + strict = False if kwargs.get('property'): # input validation properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) transit_network = properties.get('transit-network') if transit_network: transit_networks = [transit_network] + strict = bool(properties.get('strict', 'false').lower() == 'true') - # Max number of allowed address pairs (allowing 3 for fixed ips) - num_allowed_addr_pairs = nsxlib_consts.NUM_ALLOWED_IP_ADDRESSES - 3 + LOG.info("Running migration config validation in %sstrict mode", + '' if strict else 'non-') admin_context = n_context.get_admin_context() n_errors = 0 + # General config options / per AZ which are unsupported + zones = nsx_az.NsxVAvailabilityZones() + unsupported_configs = ['edge_ha', 'edge_host_groups'] + for az in zones.list_availability_zones_objects(): + for attr in unsupported_configs: + if getattr(az, attr): + LOG.warning("WARNING: \'%s\' configuration is not supported " + "and will not be honored by NSX-T (availability " + "zone %s)", attr, az.name) + if strict: + n_errors = n_errors + 1 + with utils.NsxVPluginWrapper() as plugin: + # The migration is supported only for NSX 6.4.9 and above + nsx_ver = plugin.nsx_v.vcns.get_version() + if not c_utils.is_nsxv_version_6_4_9(nsx_ver): + LOG.error("ERROR: Migration with NSX-V version %s is not " + "supported.", nsx_ver) + n_errors = n_errors + 1 + # Ports validations: + # Max number of allowed address pairs (allowing 1 for fixed ips) + num_allowed_addr_pairs = nsxlib_consts.NUM_ALLOWED_IP_ADDRESSES_v4 - 1 ports = plugin.get_ports(admin_context) for port in ports: net_id = port['network_id'] # Too many address pairs in a port address_pairs = port.get(addr_apidef.ADDRESS_PAIRS) if len(address_pairs) > num_allowed_addr_pairs: - n_errors = n_errors + 1 - LOG.error("%s allowed address pairs for port %s. Only %s are " - "allowed.", - len(address_pairs), port['id'], - num_allowed_addr_pairs) + LOG.warning("WARNING: %s allowed address pairs for port %s. " + "Only %s are allowed.", + len(address_pairs), port['id'], + num_allowed_addr_pairs) + if strict: + n_errors = n_errors + 1 # Compute port on external network if (port.get('device_owner', '').startswith( nl_constants.DEVICE_OWNER_COMPUTE_PREFIX) and plugin._network_is_external(admin_context, net_id)): n_errors = n_errors + 1 - LOG.error("Compute port %s on external network %s is not " - "allowed.", port['id'], net_id) + LOG.error("ERROR: Compute port %s on external network %s is " + "not allowed.", port['id'], net_id) # Networks & subnets validations: networks = plugin.get_networks(admin_context) @@ -89,7 +127,7 @@ def validate_config_for_migration(resource, event, trigger, **kwargs): if (net_type == c_utils.NsxVNetworkTypes.VXLAN or net_type == c_utils.NsxVNetworkTypes.PORTGROUP): n_errors = n_errors + 1 - LOG.error("Network %s of type %s is not supported.", + LOG.error("ERROR: Network %s of type %s is not supported.", net['id'], net_type) subnets = plugin._get_subnets_by_network(admin_context, net['id']) @@ -101,8 +139,8 @@ def validate_config_for_migration(resource, event, trigger, **kwargs): n_dhcp_subnets = n_dhcp_subnets + 1 if n_dhcp_subnets > 1: n_errors = n_errors + 1 - LOG.error("Network %s has %s dhcp subnets. Only 1 is allowed.", - net['id'], n_dhcp_subnets) + LOG.error("ERROR: Network %s has %s dhcp subnets. Only 1 is " + "allowed.", net['id'], n_dhcp_subnets) # Subnets overlapping with the transit network for subnet in subnets: @@ -123,7 +161,7 @@ def validate_config_for_migration(resource, event, trigger, **kwargs): if (netaddr.IPSet(subnet_net) & netaddr.IPSet(transit_networks)): n_errors = n_errors + 1 - LOG.error("Subnet %s overlaps with the transit " + LOG.error("ERROR: Subnet %s overlaps with the transit " "network ips: %s.", subnet['id'], transit_networks) @@ -132,8 +170,8 @@ def validate_config_for_migration(resource, event, trigger, **kwargs): admin_context, net['id']) if len(intf_ports) > 1: n_errors = n_errors + 1 - LOG.error("Network %s has interfaces on multiple routers. " - "Only 1 is allowed.", net['id']) + LOG.error("ERROR: Network %s has interfaces on multiple " + "routers. Only 1 is allowed.", net['id']) # Routers validations: routers = plugin.get_routers(admin_context) @@ -149,46 +187,96 @@ def validate_config_for_migration(resource, event, trigger, **kwargs): if gw_ip_set & if_ip_set: n_errors = n_errors + 1 - LOG.error("Interface network of router %s cannot overlap with " - "router GW network", router['id']) - - # TODO(asarfaty): missing validations: - # - Vlan provider network with the same VLAN tag as the uplink - # profile tag used in the relevant transport node - # (cannot check this without access to the T manager) + LOG.error("ERROR: Interface network of router %s cannot " + "overlap with router GW network", router['id']) # Octavia loadbalancers validation: filters = {'device_owner': [nl_constants.DEVICE_OWNER_LOADBALANCERV2, oct_const.DEVICE_OWNER_OCTAVIA]} lb_ports = plugin.get_ports(admin_context, filters=filters) for port in lb_ports: + lb_id = port.get('device_id') fixed_ips = port.get('fixed_ips', []) if fixed_ips: subnet_id = fixed_ips[0]['subnet_id'] network = lb_utils.get_network_from_subnet( admin_context, plugin, subnet_id) - router_id = lb_utils.get_router_from_network( + lb_router_id = _get_router_from_network( admin_context, plugin, subnet_id) # Loadbalancer vip subnet must be connected to a router or # belong to an external network - if (not router_id and network and + if (not lb_router_id and network and not network.get('router:external')): n_errors = n_errors + 1 - LOG.error("Loadbalancer %s subnet %s is not external " - "nor connected to a router.", + LOG.error("ERROR: Loadbalancer %s subnet %s is not " + "external nor connected to a router.", port.get('device_id'), subnet_id) - # TODO(asarfaty): Multiple listeners on the same pool is not - # supported, but currently the admin utility has no access to this - # information from octavia + if not lb_id: + continue - # TODO(asarfaty): Member on external subnet must have fip as ip, - # but currently the admin utility has no access to this information - # from octavia + lb_id = lb_id[3:] + lb_binding = nsxv_db.get_nsxv_lbaas_loadbalancer_binding( + admin_context.session, lb_id) + if not lb_binding or not lb_binding['edge_id']: + LOG.warning("Cannot find edge for Loadbalancer %s", lb_id) + continue + edge_id = lb_binding['edge_id'] - # TODO(asarfaty): Load Balancer with members from various subnets - # not uplinked to the same edge router) - the api_replay will work - # but the member will not have connectivity + # Multiple listeners on the same pool is not supported + result = plugin.nsx_v.vcns.get_vips(edge_id) + if len(result) == 2: + edge_vs = result[1] + pools = [] + for vip in edge_vs.get('virtualServer', []): + if not vip.get('defaultPoolId'): + continue + if vip['defaultPoolId'] in pools: + LOG.error("ERROR: Found multiple listeners using the " + "same default pool with loadbalancer %s. " + "This is not supported.", lb_id) + n_errors = n_errors + 1 + break + pools.append(vip['defaultPoolId']) + + # Cannot support LB with members from various subnets not uplinked + # to the same edge router. This can be indicated by multiple + # internal interfaces on the LB edge + is_old_lb = lb_common.is_lb_on_router_edge( + admin_context, plugin, edge_id) + if not is_old_lb: + filters = {'device_id': [lb_id], + 'device_owner': [lb_common.LBAAS_DEVICE_OWNER]} + lb_ports = plugin.get_ports(admin_context, filters=filters) + # get the subnets of those ports + lb_subnets = list(set([port['fixed_ips'][0]['subnet_id'] + for port in lb_ports])) + # make sure all subnets are connected to the same router + lb_routers = [lb_router_id] + for sub_id in lb_subnets: + router_id = _get_router_from_network( + admin_context, plugin, sub_id) + if not router_id: + LOG.error("ERROR: Found member of subnet %s not " + "uplinked to any router on loadbalancer " + "%s. This is not supported.", + sub_id, lb_id) + n_errors = n_errors + 1 + elif router_id not in lb_routers: + lb_routers.append(router_id) + if len(lb_routers) > 1: + LOG.error("ERROR: Found members uplinked to different " + "routers on loadbalancer %s. This is not " + "supported.", lb_id) + n_errors = n_errors + 1 + break + + # L2GW is not supported with the policy plugin + l2gws = admin_context.session.query(l2gateway_models.L2Gateway).all() + if len(l2gws): + LOG.error("ERROR: Found %s L2Gws. Networking-l2gw is not supported.", + len(l2gws)) + n_errors = n_errors + 1 if n_errors > 0: plural = n_errors > 1