Fix remaining typos in comments and tests
Change-Id: I872422cffd1f9a2e59b5e18a86695e5cb6edc2cd
This commit is contained in:
parent
ac0f247352
commit
8ab5ee1d17
@ -349,7 +349,7 @@ the default set of quotas are enforced for all projects, so no
|
||||
|
||||
.. note::
|
||||
|
||||
Listing defualt quotas with the OpenStack command line client will
|
||||
Listing default quotas with the OpenStack command line client will
|
||||
provide all quotas for networking and other services. Previously,
|
||||
the :command:`neutron quota-show --tenant_id` would list only networking
|
||||
quotas.
|
||||
|
@ -61,7 +61,7 @@ In the .gitignore files, add patterns to exclude files created by tools
|
||||
integrated, such as test frameworks from the project's recommended workflow,
|
||||
rendered documentation and package builds.
|
||||
|
||||
Don't add patterns to exculde files created by preferred personal like for
|
||||
Don't add patterns to exclude files created by preferred personal like for
|
||||
example editors, IDEs or operating system.
|
||||
These should instead be maintained outside the repository, for example in a
|
||||
~/.gitignore file added with::
|
||||
|
@ -157,7 +157,7 @@ The workflow for the life an RFE in Launchpad is as follows:
|
||||
* In either case (a spec being required or not), once the discussion has
|
||||
happened and there is positive consensus on the RFE, the report is 'approved',
|
||||
and its tag will move from `rfe-triaged` to `rfe-approved`.
|
||||
* An RFE can be occasionaly marked as 'rfe-postponed' if the team identifies
|
||||
* An RFE can be occasionally marked as 'rfe-postponed' if the team identifies
|
||||
a dependency between the proposed RFE and other pending tasks that prevent
|
||||
the RFE from being worked on immediately.
|
||||
* Once an RFE is approved, it needs volunteers. Approved RFEs that do not have an
|
||||
|
@ -630,7 +630,7 @@ class OVSBridge(BaseOVS):
|
||||
options['local_ip'] = local_ip
|
||||
options['in_key'] = 'flow'
|
||||
options['out_key'] = 'flow'
|
||||
# NOTE(moshele): pkt_mark is not upported when using ovs hw-offload,
|
||||
# NOTE(moshele): pkt_mark is not supported when using ovs hw-offload,
|
||||
# therefore avoid clear mark on encapsulating packets when it's
|
||||
# enabled
|
||||
if not self.is_hw_offload_enabled:
|
||||
|
@ -53,7 +53,7 @@ class DHCPIPv4Responder(dhcp_base.DHCPResponderBase):
|
||||
default_route = self.get_bin_route(constants.IPv4_ANY, gateway)
|
||||
bin_routes += default_route
|
||||
|
||||
# For some VMs they may need the metadate IP's route, we move
|
||||
# For some VMs they may need the metadata IP's route, we move
|
||||
# the destination to gateway IP.
|
||||
if gateway:
|
||||
meta_route = self.get_bin_route(
|
||||
|
@ -44,7 +44,7 @@ class FdbPopulationAgentExtension(
|
||||
http://events.linuxfoundation.org/sites/events/files/slides/LinuxConJapan2014_makita_0.pdf
|
||||
"""
|
||||
|
||||
# FDB udpates are triggered for ports with a certain device_owner only:
|
||||
# FDB updates are triggered for ports with a certain device_owner only:
|
||||
# - device owner "compute": updates the FDB with normal port instances,
|
||||
# required in order to enable communication between
|
||||
# SR-IOV direct port instances and normal port instance.
|
||||
|
@ -69,7 +69,7 @@ class DvrEdgeHaRouter(dvr_edge_router.DvrEdgeRouter,
|
||||
prefix=router_info.INTERNAL_DEV_PREFIX)
|
||||
self._clear_vips(interface_name)
|
||||
# NOTE(slaweq): qr- interface is not in ha_namespace but in qrouter
|
||||
# namespace in case of dvr ha ruter
|
||||
# namespace in case of dvr ha router
|
||||
self._disable_ipv6_addressing_on_interface(
|
||||
interface_name, namespace=self.ns_name)
|
||||
for ip_cidr in ip_cidrs:
|
||||
|
@ -162,7 +162,7 @@ class FipNamespace(namespaces.Namespace):
|
||||
'FIP namespace failed')
|
||||
|
||||
def _create_gateway_port(self, ex_gw_port, interface_name):
|
||||
"""Create namespace, request port creationg from Plugin,
|
||||
"""Create namespace, request port creation from Plugin,
|
||||
then configure Floating IP gateway port.
|
||||
"""
|
||||
self.create()
|
||||
@ -442,7 +442,7 @@ class FipNamespace(namespaces.Namespace):
|
||||
self._add_cidr_to_device(rtr_2_fip_dev, str(rtr_2_fip))
|
||||
self._add_cidr_to_device(fip_2_rtr_dev, str(fip_2_rtr))
|
||||
|
||||
# Add permanant ARP entries on each side of veth pair
|
||||
# Add permanent ARP entries on each side of veth pair
|
||||
rtr_2_fip_dev.neigh.add(common_utils.cidr_to_ip(fip_2_rtr),
|
||||
fip_2_rtr_dev.link.address)
|
||||
fip_2_rtr_dev.neigh.add(common_utils.cidr_to_ip(rtr_2_fip),
|
||||
|
@ -351,7 +351,7 @@ class FipQosAgentExtension(qos_base.L3QosAgentExtensionBase,
|
||||
# node), because floating IP qos rates may have been
|
||||
# processed in dvr snat-namespace, so here the cache was
|
||||
# already set. We just install the rules to the device in
|
||||
# qrouter-namesapce.
|
||||
# qrouter-namespace.
|
||||
self.process_ip_rates(
|
||||
fip_res, dvr_fip_device, rates, with_cache=False)
|
||||
|
||||
|
@ -72,6 +72,6 @@ class L3AgentExtensionAPI(object):
|
||||
|
||||
def register_router(self, features, router_cls):
|
||||
"""Register router class with the given features. This is for the
|
||||
plugin to ovrride with their own ``router_info`` class.
|
||||
plugin to override with their own ``router_info`` class.
|
||||
"""
|
||||
self._router_factory.register(features, router_cls)
|
||||
|
@ -679,7 +679,7 @@ class Dnsmasq(DhcpLocalProcess):
|
||||
|
||||
In dnsmasq it is not possible to configure two dhcp-host
|
||||
entries mapped to a single client mac address with IP
|
||||
addresses in the same subnet. When recieving a requst
|
||||
addresses in the same subnet. When receiving a request
|
||||
dnsmasq will match on the first entry in it's config,
|
||||
and lease that address. The second entry will never be
|
||||
used.
|
||||
|
@ -274,9 +274,9 @@ class ConjIdMap(object):
|
||||
|
||||
def __init__(self, int_br):
|
||||
self.id_map = collections.defaultdict(self._conj_id_factory)
|
||||
# Stores the set of conjuntion IDs used for each unique tuple
|
||||
# Stores the set of conjunction IDs used for each unique tuple
|
||||
# (sg_id, remote_id, direction, ethertype). Each tuple
|
||||
# can have up to 8 conjuntion IDs (see ConjIPFlowManager.add()).
|
||||
# can have up to 8 conjunction IDs (see ConjIPFlowManager.add()).
|
||||
self.id_map_group = collections.defaultdict(set)
|
||||
self.id_free = collections.deque()
|
||||
self._max_id = self._init_max_id(int_br)
|
||||
|
@ -302,7 +302,7 @@ class TcCommand(ip_lib.IPDevice):
|
||||
def _add_policy_filter(self, bw_limit, burst_limit,
|
||||
qdisc_id=INGRESS_QDISC_ID):
|
||||
# NOTE(slaweq): it is made in exactly same way how openvswitch is doing
|
||||
# it when configuing ingress traffic limit on port. It can be found in
|
||||
# it when configuring ingress traffic limit on port. It can be found in
|
||||
# lib/netdev-linux.c#L4698 in openvswitch sources:
|
||||
add_tc_filter_policy(self.name, qdisc_id, bw_limit, burst_limit,
|
||||
MAX_MTU_VALUE, 'drop', priority=49)
|
||||
|
@ -118,7 +118,7 @@ class RemoteResourceCache(object):
|
||||
If the attribute on the object is a list, each value is checked if it
|
||||
is in the list.
|
||||
|
||||
The values in the dicionary for a single key are matched in an OR
|
||||
The values in the dictionary for a single key are matched in an OR
|
||||
fashion.
|
||||
"""
|
||||
self._flood_cache_for_query(rtype, **filters)
|
||||
|
@ -62,8 +62,8 @@ def main():
|
||||
"""Main method for cleaning up empty linux bridges.
|
||||
|
||||
This tool deletes every empty linux bridge managed by linuxbridge agent
|
||||
(brq.* linux bridges) except thes ones defined using bridge_mappings option
|
||||
in section LINUX_BRIDGE (created by deployers).
|
||||
(brq.* linux bridges) except these ones defined using bridge_mappings
|
||||
option in section LINUX_BRIDGE (created by deployers).
|
||||
|
||||
This tool should not be called during an instance create, migrate, etc. as
|
||||
it can delete a linux bridge about to be used by nova.
|
||||
|
@ -45,7 +45,7 @@ def get_connection():
|
||||
by domain name or domain ID. If none of the domain selection variables are
|
||||
set the tool will default to use the domain with literal ID of 'default'.
|
||||
|
||||
To select domain by name set both of these envornment variables:
|
||||
To select domain by name set both of these environment variables:
|
||||
|
||||
OS_USER_DOMAIN_NAME Name of domain to authenticate to
|
||||
OS_PROJECT_DOMAIN_NAME Name of domain for authorization
|
||||
|
@ -61,7 +61,7 @@ class OVNMechanismDriver(mech_driver.OVNMechanismDriver):
|
||||
def _clean_hash_ring(self):
|
||||
"""Don't clean the hash ring.
|
||||
|
||||
If this method was not overriden, cleanup would be performed when
|
||||
If this method was not overridden, cleanup would be performed when
|
||||
calling the db sync and running neutron server would lose all the nodes
|
||||
from the ring.
|
||||
"""
|
||||
@ -132,7 +132,7 @@ class AgentNotifierApi(object):
|
||||
pass
|
||||
|
||||
def security_groups_provider_updated(self, context,
|
||||
devices_to_udpate=None):
|
||||
devices_to_update=None):
|
||||
pass
|
||||
|
||||
|
||||
|
@ -23,7 +23,7 @@ class BaseChecks(object, metaclass=abc.ABCMeta):
|
||||
neutron-status CLI tool should inherit from this class.
|
||||
|
||||
Each check method have to accept neutron.cmd.status.Checker
|
||||
class as an argument because all checkes will be run in context of
|
||||
class as an argument because all checks will be run in context of
|
||||
this class.
|
||||
"""
|
||||
|
||||
|
@ -312,10 +312,10 @@ class CoreChecks(base.BaseChecks):
|
||||
def nic_switch_agent_min_kernel_check(checker):
|
||||
# TODO(adrianc): This was introduced in U release, consider removing
|
||||
# in 1-2 cycles.
|
||||
# Background: Issue with old kernel is appernet in CentOS 7 and older.
|
||||
# Background: Issue with old kernel is apparent in CentOS 7 and older.
|
||||
# U release is the first release that moves from CentOS-7 to CentOS-8,
|
||||
# this was added as a "heads-up" for operators to make sure min kernel
|
||||
# requirement is fullfiled.
|
||||
# requirement is fulfilled.
|
||||
if not cfg.CONF.database.connection:
|
||||
return upgradecheck.Result(
|
||||
upgradecheck.Code.WARNING,
|
||||
|
@ -65,7 +65,7 @@ RPC_RES_PROCESSING_STEP = 20
|
||||
# Do not move this constant to neutron-lib, since it is temporary
|
||||
IPTABLES_RANDOM_FULLY_VERSION = '1.6.0'
|
||||
|
||||
# Segmentation ID pool; DB select limit to improve the performace.
|
||||
# Segmentation ID pool; DB select limit to improve the performance.
|
||||
IDPOOL_SELECT_SIZE = 100
|
||||
|
||||
# Ports with the following 'device_owner' values will not prevent
|
||||
|
@ -25,7 +25,7 @@ rules = [
|
||||
# NOTE: it can't be ADMIN_OR_PROJECT_READER constant from the base
|
||||
# module because that is using "project_id" in the check string and the
|
||||
# service_provider resource don't belongs to any project thus such
|
||||
# check string would fail enforcment.
|
||||
# check string would fail enforcement.
|
||||
check_str='role:reader',
|
||||
description='Get service providers',
|
||||
operations=[
|
||||
|
@ -86,7 +86,7 @@ def is_port_bound(port):
|
||||
class DVRResourceOperationHandler(object):
|
||||
"""Contains callbacks for DVR operations.
|
||||
|
||||
This can be implemented as a mixin or can be intantiated as a stand-alone
|
||||
This can be implemented as a mixin or can be instantiated as a stand-alone
|
||||
object. Either way, it will subscribe itself to the relevant L3 events and
|
||||
use the plugin directory to find the L3 plugin to make calls to it as
|
||||
necessary.
|
||||
|
@ -351,7 +351,7 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin):
|
||||
|
||||
@db_api.CONTEXT_READER
|
||||
def _get_dvr_hosts_for_subnets(self, context, subnet_ids):
|
||||
"""Get a list of hosts with DVR servicable ports on subnet_ids."""
|
||||
"""Get a list of hosts with DVR serviceable ports on subnet_ids."""
|
||||
host_dvr_dhcp = cfg.CONF.host_dvr_for_dhcp
|
||||
Binding = ml2_models.PortBinding
|
||||
Port = models_v2.Port
|
||||
|
@ -92,7 +92,7 @@ class L3_gw_ip_qos_dbonly_mixin(l3_gwmode_db.L3_NAT_dbonly_mixin):
|
||||
if not info or qos_consts.QOS_POLICY_ID not in info:
|
||||
# An explicit 'None' for `qos_policy_id` indicates to clear
|
||||
# the router gateway IP policy. So if info does not have
|
||||
# the key `qos_polcy_id`, we can not decide what behavior
|
||||
# the key `qos_policy_id`, we can not decide what behavior
|
||||
# to be done, then directly return here.
|
||||
return
|
||||
|
||||
|
@ -38,7 +38,7 @@ class HasInUse(object):
|
||||
"in_use". Writing any value on this DB parameter will lock the container
|
||||
register. At the end of the DB transaction, the DB engine will check if
|
||||
this register was modified or deleted. In such case, the transaction will
|
||||
fail and won't be commited.
|
||||
fail and won't be committed.
|
||||
|
||||
"lock_register" is the method to write the register "in_use" column.
|
||||
Because the lifespan of this DB lock is the DB transaction, there isn't an
|
||||
|
@ -211,7 +211,7 @@ class DbQuotaDriver(nlib_quota_api.QuotaDriverAPI):
|
||||
requested_resources = deltas.keys()
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
# "get_project_quotas" needs in input a dictionary mapping resource
|
||||
# name to BaseResosurce instances so that the default quota can be
|
||||
# name to BaseResource instances so that the default quota can be
|
||||
# retrieved
|
||||
current_limits = self.get_project_quotas(
|
||||
context, resources, project_id)
|
||||
|
@ -127,7 +127,7 @@ def check_assertisinstance(logical_line, filename):
|
||||
|
||||
@core.flake8ext
|
||||
def check_assertequal_for_httpcode(logical_line, filename):
|
||||
"""N332 - Enforce correct oredering for httpcode in assertEqual."""
|
||||
"""N332 - Enforce correct ordering for httpcode in assertEqual."""
|
||||
msg = ("N332: Use assertEqual(expected_http_code, observed_http_code) "
|
||||
"instead of assertEqual(observed_http_code, expected_http_code)")
|
||||
if 'neutron/tests/' in filename:
|
||||
|
@ -131,7 +131,7 @@ class Subnet(object, metaclass=abc.ABCMeta):
|
||||
|
||||
A subnet would typically be associated with a network but may not be. It
|
||||
could represent a dynamically routed IP address space in which case the
|
||||
normal network and broadcast addresses would be useable. It should always
|
||||
normal network and broadcast addresses would be usable. It should always
|
||||
be a routable block of addresses and representable in CIDR notation.
|
||||
"""
|
||||
|
||||
|
@ -45,7 +45,7 @@ class IpamSubnetManager(object):
|
||||
associates it with its neutron identifier, if specified.
|
||||
|
||||
:param context: neutron api request context
|
||||
:returns: the idenfier of created IPAM subnet
|
||||
:returns: the identifier of created IPAM subnet
|
||||
"""
|
||||
if not self._ipam_subnet_id:
|
||||
self._ipam_subnet_id = uuidutils.generate_uuid()
|
||||
|
@ -229,7 +229,7 @@ class IpamSubnetGroup(driver.SubnetGroup):
|
||||
to try to allocate an IP from each subnet in turn, one by one. This
|
||||
implementation preserves that behavior so that existing drivers work
|
||||
as they did before while giving them the opportunity to optimize it
|
||||
by overridding the implementation.
|
||||
by overriding the implementation.
|
||||
'''
|
||||
for subnet_id in self._subnet_ids:
|
||||
try:
|
||||
|
@ -200,7 +200,7 @@ class AgentMechanismDriverBase(api.MechanismDriver, metaclass=abc.ABCMeta):
|
||||
:param context: PortContext instance describing the port
|
||||
:returns: True for responsible, False for not responsible
|
||||
|
||||
An agent based mechanism driver is reponsible for a resource provider
|
||||
An agent based mechanism driver is responsible for a resource provider
|
||||
if an agent of it is responsible for that resource provider. An agent
|
||||
reports responsibility by including the resource provider in the
|
||||
configurations field of the agent heartbeat.
|
||||
|
@ -41,7 +41,7 @@ TUNNEL_NETWORK_TYPES = [p_const.TYPE_GRE, p_const.TYPE_VXLAN,
|
||||
|
||||
LOCAL_SWITCHING = 0
|
||||
|
||||
# The pyhsical network types of support DVR router
|
||||
# The physical network types of support DVR router
|
||||
DVR_PHYSICAL_NETWORK_TYPES = [p_const.TYPE_VLAN, p_const.TYPE_FLAT]
|
||||
|
||||
# Various tables for DVR use of integration bridge flows
|
||||
|
@ -1701,7 +1701,7 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin,
|
||||
if updated_ports is None:
|
||||
updated_ports = set()
|
||||
|
||||
# if a port was added and then removed or viceversa since the agent
|
||||
# if a port was added and then removed or vice-versa since the agent
|
||||
# can't know the order of the operations, check the status of the port
|
||||
# to determine if the port was added or deleted
|
||||
added_ports = {p['name'] for p in events['added']}
|
||||
|
@ -1129,7 +1129,7 @@ class OVNMechanismDriver(api.MechanismDriver):
|
||||
# NOTE(lucasagomes): Router ports in OVN is never bound
|
||||
# to a host given their decentralized nature. By calling
|
||||
# provisioning_complete() - as above - don't do it for us
|
||||
# becasue the router ports are unbind so, for OVN we are
|
||||
# because the router ports are unbind so, for OVN we are
|
||||
# forcing the status here. Maybe it's something that we can
|
||||
# change in core Neutron in the future.
|
||||
db_port = ml2_db.get_port(admin_context, port_id)
|
||||
|
@ -337,8 +337,8 @@ class OVNClient(object):
|
||||
port_security, new_macs = (
|
||||
self._get_allowed_addresses_from_port(port))
|
||||
# TODO(egarciar): OVN supports MAC learning from v21.03. This
|
||||
# if-else block is stated so as to keep compability with older OVN
|
||||
# versions and should be removed in the future.
|
||||
# if-else block is stated so as to keep compatibility with older
|
||||
# OVN versions and should be removed in the future.
|
||||
if self._sb_idl.is_table_present('FDB'):
|
||||
if (port_security or port_type or dhcpv4_options or
|
||||
dhcpv6_options):
|
||||
@ -1005,7 +1005,7 @@ class OVNClient(object):
|
||||
'NAT', nat.uuid, 'logical_port')])
|
||||
else:
|
||||
# NOTE(mjozefcz): The FIP from LB VIP is
|
||||
# dissassociated now. We can decentralize
|
||||
# disassociated now. We can decentralize
|
||||
# member FIPs now.
|
||||
LOG.warning(
|
||||
"Port %s is configured as a member "
|
||||
@ -2445,7 +2445,7 @@ class OVNClient(object):
|
||||
port_dns_records[fqdn] = dns_assignment['ip_address']
|
||||
else:
|
||||
port_dns_records[fqdn] += " " + dns_assignment['ip_address']
|
||||
# Add reverse DNS enteries for port only for fqdn
|
||||
# Add reverse DNS entries for port only for fqdn
|
||||
for ip in port_dns_records[fqdn].split(" "):
|
||||
ptr_record = netaddr.IPAddress(ip).reverse_dns.rstrip(".")
|
||||
port_dns_records[ptr_record] = fqdn
|
||||
|
@ -1003,7 +1003,7 @@ class OvnNbSynchronizer(OvnDbSynchronizer):
|
||||
{'network': db_network,
|
||||
'segment': db_segment,
|
||||
'lswitch': lswitch['name']})
|
||||
# Delete orhpaned provnet ports
|
||||
# Delete orphaned provnet ports
|
||||
for provnet_port in lswitch['provnet_ports']:
|
||||
if provnet_port in segments_provnet_port_names:
|
||||
continue
|
||||
|
@ -624,7 +624,7 @@ class Ml2OvnIdlBase(connection.OvsdbIdl):
|
||||
|
||||
def set_table_condition(self, table_name, condition):
|
||||
# Prior to ovs commit 46d44cf3be0, self.cond_change() doesn't work here
|
||||
# but after that commit, setting table.condtion doesn't work.
|
||||
# but after that commit, setting table.condition doesn't work.
|
||||
if hasattr(ovs_idl_mod, 'ConditionState'):
|
||||
self.cond_change(table_name, condition)
|
||||
else:
|
||||
|
@ -2530,7 +2530,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
|
||||
for network_segment in network_segments
|
||||
# A segment popped from a network could have its
|
||||
# segmentation_id set to None if the segment
|
||||
# beeing created is partial.
|
||||
# being created is partial.
|
||||
if not ((network_segment[api.SEGMENTATION_ID] ==
|
||||
changed_segment[api.SEGMENTATION_ID] or
|
||||
network_segment[api.SEGMENTATION_ID] is None) and
|
||||
|
@ -551,7 +551,7 @@ def remove_netns(name, **kwargs):
|
||||
def list_netns(**kwargs):
|
||||
"""List network namespaces.
|
||||
|
||||
Caller requires raised priveleges to list namespaces
|
||||
Caller requires raised privileges to list namespaces
|
||||
"""
|
||||
return netns.listnetns(**kwargs)
|
||||
|
||||
|
@ -639,7 +639,7 @@ class IptablesMeteringDriver(abstract_driver.MeteringAbstractDriver):
|
||||
|
||||
# NOTE(bno1): Sometimes a router is added before its namespaces are
|
||||
# created. The metering agent has to periodically check if the
|
||||
# namespaces for the missing iptables managers have appearead and
|
||||
# namespaces for the missing iptables managers have appeared and
|
||||
# create the managers for them. When a new manager is created, the
|
||||
# metering rules have to be added to it.
|
||||
if rm.create_iptables_managers():
|
||||
|
@ -329,7 +329,7 @@ class NDPProxyPlugin(l3_ndp_proxy.NDPProxyBase):
|
||||
internal_address_scope = self._check_port(
|
||||
context, port_dict, ndp_proxy, router_ports)
|
||||
# If the external network and internal network not belong to same
|
||||
# adddress scope, the packets can't be forwarded by route. So, in
|
||||
# address scope, the packets can't be forwarded by route. So, in
|
||||
# this case we should forbid to create ndp proxy entry.
|
||||
if ext_address_scope != internal_address_scope:
|
||||
raise exc.AddressScopeConflict(
|
||||
|
@ -91,7 +91,7 @@ class TestSegmentationId(BaseSegmentationIdTest):
|
||||
self.safe_client.create_port(self.project_id, network['id'])
|
||||
# Port failed to bind
|
||||
self.safe_client.create_port(self.project_id, network['id'],
|
||||
"non-exisiting-host")
|
||||
"non-existing-host")
|
||||
|
||||
self._update_segmentation_id(network)
|
||||
|
||||
|
@ -210,7 +210,7 @@ class TestTrunkPlugin(base.BaseFullStackTestCase):
|
||||
# NOTE(slaweq): As is described in bug
|
||||
# https://bugs.launchpad.net/neutron/+bug/1687709 when more than one
|
||||
# different ovs-agent with enabled trunk driver is running at a time it
|
||||
# might lead to race contitions between them.
|
||||
# might lead to race conditions between them.
|
||||
# Because of that ovs_agent used for fullstack tests is monkeypatched and
|
||||
# loads trunk driver only if trunk service plugin is enabled.
|
||||
# That makes restriction that only a single set of tests with trunk-enabled
|
||||
|
@ -100,7 +100,7 @@ class L3AgentNDPProxyTestFramework(framework.L3AgentTestFramework):
|
||||
res = ip_wrapper.netns.execute(cmd)
|
||||
proxies = []
|
||||
for proxy in res.split('\n'):
|
||||
# Exclute null line
|
||||
# Exclude null line
|
||||
if proxy:
|
||||
proxy_list = proxy.split(' ')
|
||||
if interface_name in proxy_list:
|
||||
|
@ -341,7 +341,7 @@ class TestOVNFunctionalBase(test_plugin.Ml2PluginV2TestCase,
|
||||
|
||||
# NOTE(mjozefcz): We can find occasional functional test
|
||||
# failures because of low timeout value - set it to 30
|
||||
# seconds, should be enought. More info: 1868110
|
||||
# seconds, should be enough. More info: 1868110
|
||||
cfg.CONF.set_override(
|
||||
'ovsdb_connection_timeout', 30,
|
||||
'ovn')
|
||||
|
@ -39,7 +39,7 @@ class StatusTest(base.BaseLoggingTestCase):
|
||||
| api_workers and rpc_workers. |
|
||||
+----------------------------------------------------------------+
|
||||
|
||||
Error codes which migh be returned by this command:
|
||||
Error codes which might be returned by this command:
|
||||
- Code.SUCCESS,
|
||||
- Code.WARNING,
|
||||
- Code.FAILURE
|
||||
|
@ -127,7 +127,7 @@ class IpamTestCase(testlib_api.SqlTestCase):
|
||||
self.assert_ip_alloc_matches(ip_alloc_expected)
|
||||
self.assert_ip_alloc_pool_matches(ip_alloc_pool_expected)
|
||||
|
||||
def test_allocate_ip_exausted_pool(self):
|
||||
def test_allocate_ip_exhausted_pool(self):
|
||||
# available from .2 up to .6 -> 5
|
||||
for i in range(1, 6):
|
||||
self._create_port(uuidutils.generate_uuid())
|
||||
|
@ -1045,7 +1045,7 @@ class TestOvnNbSync(base.TestOVNFunctionalBase):
|
||||
self.assertCountEqual(expected_dhcpv6_options_ports_ids,
|
||||
monitor_lport_ids_dhcpv6_enabled)
|
||||
|
||||
# Check if unknow address is set for the expected lports.
|
||||
# Check if unknown address is set for the expected lports.
|
||||
for row in (
|
||||
self.nb_api.tables['Logical_Switch_Port'].rows.values()):
|
||||
if row.name in self.expected_ports_with_unknown_addr:
|
||||
|
@ -396,7 +396,7 @@ class TestRouter(base.TestOVNFunctionalBase):
|
||||
def fake_select(*args, **kwargs):
|
||||
self.assertCountEqual(self.candidates, kwargs['candidates'])
|
||||
# We are not interested in further processing, let us return
|
||||
# INVALID_CHASSIS to avoid erros
|
||||
# INVALID_CHASSIS to avoid errors
|
||||
return [ovn_const.OVN_GATEWAY_INVALID_CHASSIS]
|
||||
|
||||
with mock.patch.object(ovn_client._ovn_scheduler, 'select',
|
||||
|
@ -158,7 +158,7 @@ class ConntrackHelperExtensionTestCase(ConntrackHelperExtensionBaseTestCase):
|
||||
|
||||
@mock.patch.object(iptables_manager.IptablesTable, 'add_rule')
|
||||
@mock.patch.object(iptables_manager.IptablesTable, 'add_chain')
|
||||
def test_update_roter(self, mock_add_chain, mock_add_rule):
|
||||
def test_update_router(self, mock_add_chain, mock_add_rule):
|
||||
self.cth_ext.add_router(self.context, self.router)
|
||||
mock_add_chain.reset_mock()
|
||||
mock_add_rule.reset_mock()
|
||||
|
@ -263,10 +263,10 @@ class NDPProxyExtensionDVRTestCase(
|
||||
self.assertEqual(expected_calls, self.delete_route.mock_calls)
|
||||
proxy_cmd = ['ip', '-6', 'neigh', 'del',
|
||||
'proxy', '2002::1:5', 'dev', self.agent_ext_dvice]
|
||||
expceted_calls = [
|
||||
expected_calls = [
|
||||
mock.call(namespace=self.namespace),
|
||||
mock.call().netns.execute(proxy_cmd, privsep_exec=True)]
|
||||
self.assertEqual(expceted_calls, self.ip_wrapper.mock_calls)
|
||||
self.assertEqual(expected_calls, self.ip_wrapper.mock_calls)
|
||||
|
||||
|
||||
class NDPProxyExtensionLegacyDVRNoExternalTestCaseBase(
|
||||
|
@ -4150,7 +4150,7 @@ class TestBasicRouterOperations(BasicRouterOperationsFramework):
|
||||
router = l3_test_common.prepare_router_data(enable_ha=True)
|
||||
router[lib_constants.HA_INTERFACE_KEY] = None
|
||||
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
|
||||
# an early failure of an HA router initiailization shouldn't try
|
||||
# an early failure of an HA router initialization shouldn't try
|
||||
# and cleanup a state change monitor process that was never spawned.
|
||||
# Cannot use self.assertRaises(Exception, ...) as that causes an H202
|
||||
# pep8 failure.
|
||||
|
@ -1345,13 +1345,13 @@ class NotificationTest(APIv2TestBase):
|
||||
|
||||
self.assertEqual(expected_code, res.status_int)
|
||||
|
||||
def test_network_create_notifer(self):
|
||||
def test_network_create_notifier(self):
|
||||
self._resource_op_notifier('create', 'network')
|
||||
|
||||
def test_network_delete_notifer(self):
|
||||
def test_network_delete_notifier(self):
|
||||
self._resource_op_notifier('delete', 'network')
|
||||
|
||||
def test_network_update_notifer(self):
|
||||
def test_network_update_notifier(self):
|
||||
self._resource_op_notifier('update', 'network')
|
||||
|
||||
|
||||
|
@ -21,7 +21,7 @@ from neutron.tests import base
|
||||
class TestNeutronOVNDBSyncUtil(base.BaseTestCase):
|
||||
|
||||
def test_setup_conf(self):
|
||||
# the code under test will fail because of the cfg.conf alredy being
|
||||
# the code under test will fail because of the cfg.conf already being
|
||||
# initialized by the BaseTestCase setUp method. Reset.
|
||||
cfg.CONF.reset()
|
||||
util.setup_conf()
|
||||
|
@ -107,7 +107,7 @@ class ProjectAdminTests(AutoAllocatedTopologyAPITestCase):
|
||||
)
|
||||
|
||||
def test_delete_topology(self):
|
||||
# Project members can delete topolies inside project, but not outside
|
||||
# Project members can delete topologies inside project, but not outside
|
||||
# project
|
||||
self.assertTrue(
|
||||
policy.enforce(self.context, DELETE_POLICY, self.target)
|
||||
|
@ -6800,7 +6800,7 @@ class DbModelMixin(object):
|
||||
In this test we start an update of the name on a model in an eventlet
|
||||
coroutine where it will be blocked before it can commit the results.
|
||||
Then while it is blocked, we will update the description of the model
|
||||
in the foregound and ensure that this results in the coroutine
|
||||
in the foreground and ensure that this results in the coroutine
|
||||
receiving a StaleDataError as expected.
|
||||
"""
|
||||
lock = functools.partial(lockutils.lock, uuidutils.generate_uuid())
|
||||
|
@ -379,7 +379,7 @@ class TestL3_NAT_dbonly_mixin(
|
||||
|
||||
def test__create_gw_port(self):
|
||||
# NOTE(slaweq): this test is probably wrong
|
||||
# returing dict as gw_port breaks test later in L334 in
|
||||
# returning dict as gw_port breaks test later in L334 in
|
||||
# neutron.db.l3_db file
|
||||
router_id = '2afb8434-7380-43a2-913f-ba3a5ad5f349'
|
||||
router = l3_models.Router(id=router_id)
|
||||
|
@ -132,7 +132,7 @@ class TestHashRing(testlib_api.SqlTestCaseLight):
|
||||
self.assertEqual(4, len(ovn_hash_ring_db.get_active_nodes(
|
||||
self.admin_ctx, interval=60, group_name=HASH_RING_TEST_GROUP)))
|
||||
|
||||
# Substract 60 seconds from utcnow() and touch the nodes from our host
|
||||
# Subtract 60 seconds from utcnow() and touch the nodes from our host
|
||||
time.sleep(1)
|
||||
fake_utcnow = timeutils.utcnow() - datetime.timedelta(seconds=60)
|
||||
with mock.patch.object(timeutils, 'utcnow') as mock_utcnow:
|
||||
|
@ -112,7 +112,7 @@ class TestRevisionNumber(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
|
||||
except Exception as exc:
|
||||
if type(exc) is not db_exc.DBDuplicateEntry:
|
||||
self.fail("create_initial_revision with the same parameters "
|
||||
"should have raisen a DBDuplicateEntry exception")
|
||||
"should have raised a DBDuplicateEntry exception")
|
||||
|
||||
with db_api.CONTEXT_WRITER.using(self.ctx):
|
||||
args = (self.net['id'], ovn_rn_db.TYPE_NETWORKS)
|
||||
|
@ -2212,7 +2212,7 @@ class GetObjectClassByModelTestCase(UniqueObjectBase):
|
||||
self.registered_object.db_model)
|
||||
self.assertIs(self.registered_object, found_obj)
|
||||
|
||||
def test_not_registed_object_raises_exception(self):
|
||||
def test_not_registered_object_raises_exception(self):
|
||||
with testtools.ExpectedException(o_exc.NeutronDbObjectNotFoundByModel):
|
||||
base.get_object_class_by_model(self.not_registered_object.db_model)
|
||||
|
||||
|
@ -39,7 +39,7 @@ class FakeAgentMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase):
|
||||
|
||||
NOTE(yamamoto): This is a modified copy of ofagent mechanism driver as
|
||||
of writing this. There's no need to keep this synced with the "real"
|
||||
ofagent mechansim driver or its agent.
|
||||
ofagent mechanism driver or its agent.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
|
@ -130,7 +130,7 @@ class TestOvnDbNotifyHandler(base.BaseTestCase):
|
||||
|
||||
# class TestOvnBaseConnection(base.TestCase):
|
||||
#
|
||||
# Each test is being deleted, but for reviewers sake I wanted to exaplain why:
|
||||
# Each test is being deleted, but for reviewers sake I wanted to explain why:
|
||||
#
|
||||
# @mock.patch.object(idlutils, 'get_schema_helper')
|
||||
# def testget_schema_helper_success(self, mock_gsh):
|
||||
|
@ -237,7 +237,7 @@ class TestOVNMechanismDriver(TestOVNMechanismDriverBase):
|
||||
idl.neutron_pg_drop_event.wait.return_value = True
|
||||
result = self.mech_driver._create_neutron_pg_drop()
|
||||
idl.neutron_pg_drop_event.wait.assert_called_once()
|
||||
# If sommething else creates the port group, just return
|
||||
# If something else creates the port group, just return
|
||||
self.assertIsNone(result)
|
||||
|
||||
@mock.patch.object(ovsdb_monitor.OvnInitPGNbIdl, 'from_server')
|
||||
|
@ -26,7 +26,7 @@ from neutron.tests.unit.plugins.ml2.extensions import test_dns_integration
|
||||
PROJECT_ID = uuidutils.generate_uuid()
|
||||
|
||||
|
||||
class DNSDomainKeyworkdsTestCase(
|
||||
class DNSDomainKeywordsTestCase(
|
||||
test_dns_integration.DNSIntegrationTestCase):
|
||||
|
||||
_extension_drivers = ['dns_domain_keywords']
|
||||
@ -126,7 +126,7 @@ class DNSDomainKeyworkdsTestCase(
|
||||
current_dns_domain=None, previous_dns_domain=None):
|
||||
current_dns_domain = current_dns_domain or self._expected_dns_domain
|
||||
previous_dns_domain = previous_dns_domain or self._expected_dns_domain
|
||||
super(DNSDomainKeyworkdsTestCase, self)._verify_port_dns(
|
||||
super(DNSDomainKeywordsTestCase, self)._verify_port_dns(
|
||||
port=port, dns_data_db=dns_data_db, dns_name=dns_name,
|
||||
dns_domain=dns_domain, ptr_zones=ptr_zones,
|
||||
delete_records=delete_records, provider_net=provider_net,
|
||||
|
@ -254,7 +254,7 @@ class AutoAllocateTestCase(testlib_api.SqlTestCase):
|
||||
subnets=provisioning_exception.subnets
|
||||
)
|
||||
|
||||
def test__build_topology_provisioning_error_no_toplogy(self):
|
||||
def test__build_topology_provisioning_error_no_topology(self):
|
||||
provisioning_exception = exceptions.UnknownProvisioningError(
|
||||
db_exc.DBError)
|
||||
self._test__build_topology(
|
||||
|
Loading…
Reference in New Issue
Block a user