pyupgrade changes for Python3.9+

As discussed at the Epoxy PTG meeting, run an automated
upgrade tool to make code python 3.9+ compliant.

This patch is for code (non-tests).

Result of running:

$ pyupgrade --py39-plus $(git ls-files | grep ".py$")

Fixed PEP8 errors introduced by pyupgrade by running:

$ autopep8 --select=E127,E128,E501 --max-line-length 79 -r \
  --in-place neutron

Also did manual updates as necessary to fix other errors
and warnings after above commands.

Inspired by Octavia and Nova [0].

[0] https://review.opendev.org/c/openstack/nova/+/896986

Change-Id: Ife79df75dd2c9c42d4de36edef1d29b98f5d85da
This commit is contained in:
Brian Haley 2024-10-25 13:56:10 -04:00
parent 1b9b99b7db
commit 706a24e298
363 changed files with 1376 additions and 1376 deletions

View File

@ -24,7 +24,7 @@ class AgentExtensionsManager(stevedore.named.NamedExtensionManager):
"""Manage agent extensions."""
def __init__(self, conf, namespace):
super(AgentExtensionsManager, self).__init__(
super().__init__(
namespace, conf.agent.extensions,
invoke_on_load=True, name_order=True)
LOG.info("Loaded agent extensions: %s", self.names())

View File

@ -34,7 +34,7 @@ class AsyncProcessException(Exception):
pass
class AsyncProcess(object):
class AsyncProcess:
"""Manages an asynchronous process.
This class spawns a new process via subprocess and uses

View File

@ -17,7 +17,7 @@ from neutron_lib import rpc as n_rpc
from oslo_messaging import Target
class BasePluginApi(object):
class BasePluginApi:
"""Base agent side of the rpc API"""
def __init__(self, topic, namespace, version):
target = Target(

View File

@ -14,7 +14,7 @@
# under the License.
class BasePollingManager(object):
class BasePollingManager:
def __init__(self):
self._force_polling = False

View File

@ -118,7 +118,7 @@ def get_gre_tunnel_port_type(remote_ip, local_ip):
return p_const.TYPE_GRE
class VifPort(object):
class VifPort:
def __init__(self, port_name, ofport, vif_id, vif_mac, switch):
self.port_name = port_name
self.ofport = ofport
@ -134,7 +134,7 @@ class VifPort(object):
self.switch.br_name)
class BaseOVS(object):
class BaseOVS:
def __init__(self):
self.ovsdb = impl_idl.api_factory()
@ -247,7 +247,7 @@ def version_from_protocol(protocol):
class OVSBridge(BaseOVS):
def __init__(self, br_name,
datapath_type=ovs_constants.OVS_DATAPATH_SYSTEM):
super(OVSBridge, self).__init__()
super().__init__()
self.br_name = br_name
self.datapath_type = datapath_type
self._default_cookie = generate_random_cookie()
@ -336,7 +336,7 @@ class OVSBridge(BaseOVS):
# IGMP Neutron configs are more value consistent using True to
# enable a feature and False to disable it.
flood_value = ('false' if
cfg.CONF.OVS.igmp_flood_unregistered else 'true')
cfg.CONF.OVS.igmp_flood_unregistered else 'true')
other_config = {
'mcast-snooping-disable-flood-unregistered': flood_value}
with self.ovsdb.transaction() as txn:
@ -1317,7 +1317,7 @@ class OVSBridge(BaseOVS):
self.destroy()
class DeferredOVSBridge(object):
class DeferredOVSBridge:
'''Deferred OVSBridge.
This class wraps add_flow, mod_flow and delete_flows calls to an OVSBridge
@ -1346,7 +1346,7 @@ class DeferredOVSBridge(object):
self.full_ordered = full_ordered
self.order = order
if not self.full_ordered:
self.weights = dict((y, x) for x, y in enumerate(self.order))
self.weights = {y: x for x, y in enumerate(self.order)}
self.action_flow_tuples = []
self.use_bundle = use_bundle
@ -1422,7 +1422,7 @@ def _build_flow_expr_str(flow_dict, cmd, strict):
if key == 'proto':
flow_expr_arr.append(value)
else:
flow_expr_arr.append("%s=%s" % (key, str(value)))
flow_expr_arr.append("{}={}".format(key, str(value)))
if actions:
flow_expr_arr.append(actions)

View File

@ -50,10 +50,10 @@ class OvsdbMonitor(async_process.AsyncProcess):
cmd.append(','.join(columns))
if format:
cmd.append('--format=%s' % format)
super(OvsdbMonitor, self).__init__(cmd, run_as_root=run_as_root,
respawn_interval=respawn_interval,
log_output=True,
die_on_error=False)
super().__init__(cmd, run_as_root=run_as_root,
respawn_interval=respawn_interval,
log_output=True,
die_on_error=False)
self.new_events = {'added': [], 'removed': [], 'modified': []}
def get_events(self):
@ -63,7 +63,7 @@ class OvsdbMonitor(async_process.AsyncProcess):
return events
def start(self, block=False, timeout=60):
super(OvsdbMonitor, self).start()
super().start()
if block:
utils.wait_until_true(self.is_active, timeout=timeout)
@ -80,7 +80,7 @@ class SimpleInterfaceMonitor(OvsdbMonitor):
bridge_names=None, ovs=None):
self._bridge_names = bridge_names or []
self._ovs = ovs
super(SimpleInterfaceMonitor, self).__init__(
super().__init__(
'Interface',
columns=['name', 'ofport', 'external_ids'],
format='json',

View File

@ -24,7 +24,7 @@ from neutron.common import _constants as n_const
LOG = logging.getLogger(__name__)
class DeferredCall(object):
class DeferredCall:
'''Store a callable for later calling.
This is hardly more than a parameterless lambda, but this way it's much
@ -37,17 +37,17 @@ class DeferredCall(object):
self.kwargs = kwargs
def __str__(self):
return '%s(%s)' % (
return '{}({})'.format(
self.func.__name__,
', '.join([repr(x) for x in self.args] +
['%s=%s' % (k, repr(v))
['{}={}'.format(k, repr(v))
for k, v in self.kwargs.items()]))
def execute(self):
return self.func(*self.args, **self.kwargs)
class PlacementState(object):
class PlacementState:
'''Represents the desired state of the Placement DB.
This represents the state of one Neutron agent
@ -143,7 +143,8 @@ class PlacementState(object):
# we must create an agent RP under each hypervisor RP.
rps = []
for hypervisor in self._hypervisor_rps.values():
agent_rp_name = '%s:%s' % (hypervisor['name'], self._agent_type)
agent_rp_name = '{}:{}'.format(
hypervisor['name'], self._agent_type)
agent_rp_uuid = place_utils.agent_resource_provider_uuid(
self._driver_uuid_namespace, hypervisor['name'])
rps.append(
@ -159,7 +160,7 @@ class PlacementState(object):
rps = []
for device in self._rp_bandwidths:
hypervisor = self._hypervisor_rps[device]
rp_name = '%s:%s:%s' % (
rp_name = '{}:{}:{}'.format(
hypervisor['name'], self._agent_type, device)
rp_uuid = place_utils.device_resource_provider_uuid(
self._driver_uuid_namespace,
@ -192,7 +193,7 @@ class PlacementState(object):
# Remove hypervisor duplicates to avoid calling placement API multiple
# times for the same hypervisor.
hypervisors = set(h['name'] for h in self._hypervisor_rps.values())
hypervisors = {h['name'] for h in self._hypervisor_rps.values()}
for hypervisor in hypervisors:
agent_rp_uuid = place_utils.agent_resource_provider_uuid(
self._driver_uuid_namespace, hypervisor)

View File

@ -54,7 +54,7 @@ class InterfacePollingMinimizer(base_polling.BasePollingManager):
ovsdb_monitor_respawn_interval=ovs_const.DEFAULT_OVSDBMON_RESPAWN,
bridge_names=None, ovs=None):
super(InterfacePollingMinimizer, self).__init__()
super().__init__()
self._monitor = ovsdb_monitor.SimpleInterfaceMonitor(
respawn_interval=ovsdb_monitor_respawn_interval,
ovsdb_connection=cfg.CONF.OVS.ovsdb_connection,

View File

@ -21,7 +21,7 @@ from oslo_utils import timeutils
from oslo_utils import uuidutils
class ResourceUpdate(object):
class ResourceUpdate:
"""Encapsulates a resource update
An instance of this object carries the information necessary to prioritize
@ -79,7 +79,7 @@ class ResourceUpdate(object):
return self.tries < 0
class ExclusiveResourceProcessor(object):
class ExclusiveResourceProcessor:
"""Manager for access to a resource for processing
This class controls access to a resource in a non-blocking way. The first
@ -159,7 +159,7 @@ class ExclusiveResourceProcessor(object):
yield update
class ResourceProcessingQueue(object):
class ResourceProcessingQueue:
"""Manager of the queue of resources to process."""
def __init__(self):
self._queue = queue.PriorityQueue()

View File

@ -83,10 +83,10 @@ class DHCPResourceUpdate(queue.ResourceUpdate):
def __lt__(self, other):
if other.obj_type == self.obj_type == 'port':
self_ips = set(str(fixed_ip['ip_address']) for
fixed_ip in self.resource['fixed_ips'])
other_ips = set(str(fixed_ip['ip_address']) for
fixed_ip in other.resource['fixed_ips'])
self_ips = {str(fixed_ip['ip_address']) for
fixed_ip in self.resource['fixed_ips']}
other_ips = {str(fixed_ip['ip_address']) for
fixed_ip in other.resource['fixed_ips']}
if self_ips & other_ips:
return self.timestamp < other.timestamp
@ -105,7 +105,7 @@ class DhcpAgent(manager.Manager):
target = oslo_messaging.Target(version='1.0')
def __init__(self, host=None, conf=None):
super(DhcpAgent, self).__init__(host=host)
super().__init__(host=host)
self.needs_resync_reasons = collections.defaultdict(list)
self.dhcp_ready_ports = set()
self.dhcp_prio_ready_ports = set()
@ -306,7 +306,7 @@ class DhcpAgent(manager.Manager):
active_networks = self.plugin_rpc.get_active_networks_info(
enable_dhcp_filter=False)
LOG.info('All active networks have been fetched through RPC.')
active_network_ids = set(network.id for network in active_networks)
active_network_ids = {network.id for network in active_networks}
for deleted_id in known_network_ids - active_network_ids:
try:
self.disable_dhcp_helper(deleted_id)
@ -919,7 +919,7 @@ class DhcpPluginApi(base_agent_rpc.BasePluginApi):
return [dhcp.NetModel(net) for net in nets]
class NetworkCache(object):
class NetworkCache:
"""Agent cache of the current network state."""
def __init__(self):
self.cache = {}
@ -1050,7 +1050,7 @@ class NetworkCache(object):
class DhcpAgentWithStateReport(DhcpAgent):
def __init__(self, host=None, conf=None):
super(DhcpAgentWithStateReport, self).__init__(host=host, conf=conf)
super().__init__(host=host, conf=conf)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS)
self.failed_report_state = False
self.agent_state = {

View File

@ -50,7 +50,7 @@ def load_firewall_driver_class(driver):
'neutron.agent.firewall_drivers', driver)
class FirewallDriver(object, metaclass=abc.ABCMeta):
class FirewallDriver(metaclass=abc.ABCMeta):
"""Firewall Driver base class.
Defines methods that any driver providing security groups

View File

@ -41,7 +41,7 @@ IPV6_STR = "v6"
class DHCPResponderBase(base_oskenapp.BaseNeutronAgentOSKenApp):
def __init__(self, agent_api, ext_api, *args, version=IPV4_STR, **kwargs):
super(DHCPResponderBase, self).__init__(*args, **kwargs)
super().__init__(*args, **kwargs)
self.agent_api = agent_api
self.int_br = self.agent_api.request_int_br()
self.ext_api = ext_api

View File

@ -28,7 +28,7 @@ from neutron.api.rpc.callbacks import resources
LOG = logging.getLogger(__name__)
class DHCPExtensionPortInfoAPI(object):
class DHCPExtensionPortInfoAPI:
def __init__(self, cache_api):
self.cache_api = cache_api

View File

@ -152,7 +152,7 @@ class DHCPIPv4Responder(dhcp_base.DHCPResponderBase):
if is_ack:
fqdn = 'host-%s' % ip_addr.replace('.', '-').replace(':', '-')
if cfg.CONF.dns_domain:
fqdn = '%s.%s' % (fqdn, cfg.CONF.dns_domain)
fqdn = '{}.{}'.format(fqdn, cfg.CONF.dns_domain)
domain_name_bin = struct.pack('!%ds' % len(fqdn),
bytes(str(fqdn).encode()))
options.option_list.append(

View File

@ -58,9 +58,9 @@ DHCPV6_OPTION_FQDN = 39
class DHCPIPv6Responder(dhcp_base.DHCPResponderBase):
def __init__(self, agent_api, ext_api, *args, **kwargs):
super(DHCPIPv6Responder, self).__init__(agent_api, ext_api,
version=dhcp_base.IPV6_STR,
*args, **kwargs)
super().__init__(agent_api, ext_api,
version=dhcp_base.IPV6_STR,
*args, **kwargs)
def _create_duid(self, mac):
"""Create a DUID based on the mac address and time.
@ -221,7 +221,7 @@ class DHCPIPv6Responder(dhcp_base.DHCPResponderBase):
# 39: Fully Qualified Domain Name
fqdn = 'host-%s' % ip_addr.replace('.', '-').replace(':', '-')
if req_type == 'REQUEST' and cfg.CONF.dns_domain:
fqdn = '%s.%s' % (fqdn, cfg.CONF.dns_domain)
fqdn = '{}.{}'.format(fqdn, cfg.CONF.dns_domain)
# 0000 0... = Reserved: 0x00
# .... .0.. = N bit: Server should perform DNS updates

View File

@ -60,7 +60,7 @@ class FdbPopulationAgentExtension(
constants.DEVICE_OWNER_ROUTER_INTF,
constants.DEVICE_OWNER_DHCP}
class FdbTableTracker(object):
class FdbTableTracker:
"""FDB table tracker is a helper class
intended to keep track of the existing FDB rules.
"""

View File

@ -199,7 +199,7 @@ class LocalIPAgentExtension(l2_extension.L2AgentExtension):
priority=10,
nw_dst=local_ip,
reg6=vlan,
dl_type="0x{:04x}".format(ether_types.ETH_TYPE_IP),
dl_type=f"0x{ether_types.ETH_TYPE_IP:04x}",
actions='mod_dl_dst:{:s},'
'ct(commit,table={:d},zone={:d},nat(dst={:s}))'.format(
mac, ovs_constants.TRANSIENT_TABLE, vlan, dest_ip)
@ -211,7 +211,7 @@ class LocalIPAgentExtension(l2_extension.L2AgentExtension):
nw_src=dest_ip,
reg6=vlan,
ct_state="-trk",
dl_type="0x{:04x}".format(ether_types.ETH_TYPE_IP),
dl_type=f"0x{ether_types.ETH_TYPE_IP:04x}",
actions='ct(table={:d},zone={:d},nat'.format(
ovs_constants.TRANSIENT_TABLE, vlan)
)
@ -225,8 +225,8 @@ class LocalIPAgentExtension(l2_extension.L2AgentExtension):
nw_src=dest_ip,
nw_dst=local_ip,
reg6=vlan,
dl_type="0x{:04x}".format(ether_types.ETH_TYPE_IP),
actions='resubmit(,{:d})'.format(ovs_constants.TRANSIENT_TABLE)
dl_type=f"0x{ether_types.ETH_TYPE_IP:04x}",
actions=f'resubmit(,{ovs_constants.TRANSIENT_TABLE:d})'
)
def delete_local_ip_translation(self, vlan, local_ip, dest_ip, mac):

View File

@ -89,7 +89,7 @@ backend backend_{{ instance.uuid }}_{{ instance.provider_ip }}
""")
class ProxyInstance(object):
class ProxyInstance:
def __init__(self, instance_id, provider_ip, project_id):
self.uuid = instance_id
self.provider_ip = provider_ip
@ -98,7 +98,7 @@ class ProxyInstance(object):
cfg.CONF.METADATA, self.uuid)
class HostMedataHAProxyDaemonMonitor(object):
class HostMedataHAProxyDaemonMonitor:
"""Manage the data and state of a host metadata haproxy process."""
def __init__(self, process_monitor, uuid=None,
@ -114,7 +114,7 @@ class HostMedataHAProxyDaemonMonitor(object):
cfg.CONF.state_path, self._host_id,
'haproxy.conf', True)
buf = io.StringIO()
meta_api = "%s:%s" % (
meta_api = "{}:{}".format(
cfg.CONF.METADATA.nova_metadata_host,
cfg.CONF.METADATA.nova_metadata_port)
@ -138,7 +138,7 @@ class HostMedataHAProxyDaemonMonitor(object):
buf.write('%s' % _HOST_PATH_PROXY_TEMPLATE.render(
log_level='debug',
log_tag="%s-%s" % (PROXY_SERVICE_NAME, self._host_id),
log_tag="{}-{}".format(PROXY_SERVICE_NAME, self._host_id),
user=username,
group=groupname,
maxconn=1024,

View File

@ -31,7 +31,7 @@ from neutron import manager
LOG = logging.getLogger(__name__)
class QosAgentDriver(object, metaclass=abc.ABCMeta):
class QosAgentDriver(metaclass=abc.ABCMeta):
"""Defines stable abstract interface for QoS Agent Driver.
QoS Agent driver defines the interface to be implemented by Agent
@ -111,8 +111,8 @@ class QosAgentDriver(object, metaclass=abc.ABCMeta):
def _handle_rule_delete(self, port, rule_type, ingress=False):
handler_name = "".join(("delete_", rule_type))
if ingress:
handler_name = "%s_%s" % (handler_name,
constants.INGRESS_DIRECTION)
handler_name = "{}_{}".format(handler_name,
constants.INGRESS_DIRECTION)
handler = getattr(self, handler_name)
handler(port)
@ -138,7 +138,7 @@ class QosAgentDriver(object, metaclass=abc.ABCMeta):
return rule_direction == constants.INGRESS_DIRECTION
class PortPolicyMap(object):
class PortPolicyMap:
def __init__(self):
# we cannot use a dict of sets here because port dicts are not hashable
self.qos_policy_ports = collections.defaultdict(dict)

View File

@ -32,8 +32,7 @@ class L2AgentExtensionsManager(agent_ext_manager.AgentExtensionsManager):
"""
def __init__(self, conf):
super(L2AgentExtensionsManager,
self).__init__(conf, L2_AGENT_EXT_MANAGER_NAMESPACE)
super().__init__(conf, L2_AGENT_EXT_MANAGER_NAMESPACE)
def handle_port(self, context, data):
"""Notify all agent extensions to handle port."""

View File

@ -94,7 +94,7 @@ def log_verbose_exc(message, router_payload):
router_payload, indent=5))
class L3PluginApi(object):
class L3PluginApi:
"""Agent side of the l3 agent RPC API.
API version history:
@ -213,7 +213,7 @@ class L3PluginApi(object):
context, 'get_networks', filters=filters, fields=fields)
class RouterFactory(object):
class RouterFactory:
def __init__(self):
self._routers = {}
@ -330,7 +330,7 @@ class L3NATAgent(ha.AgentMixin,
self._pool_size = ROUTER_PROCESS_GREENLET_MIN
self._pool = eventlet.GreenPool(size=self._pool_size)
self._queue = queue.ResourceProcessingQueue()
super(L3NATAgent, self).__init__(host=self.conf.host)
super().__init__(host=self.conf.host)
self.target_ex_net_id = None
self.use_ipv6 = netutils.is_ipv6_enabled()
@ -999,7 +999,7 @@ class L3NATAgent(ha.AgentMixin,
class L3NATAgentWithStateReport(L3NATAgent):
def __init__(self, host, conf=None):
super(L3NATAgentWithStateReport, self).__init__(host=host, conf=conf)
super().__init__(host=host, conf=conf)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS)
self.failed_report_state = False
self.agent_state = {

View File

@ -17,11 +17,11 @@ import weakref
from neutron.agent.l3 import dvr_fip_ns
class AgentMixin(object):
class AgentMixin:
def __init__(self, host):
# dvr data
self._fip_namespaces = weakref.WeakValueDictionary()
super(AgentMixin, self).__init__(host)
super().__init__(host)
def get_fip_ns(self, ext_net_id):
# TODO(Carl) is this necessary? Code that this replaced was careful to

View File

@ -31,8 +31,8 @@ class DvrEdgeHaRouter(dvr_edge_router.DvrEdgeRouter,
"""
def __init__(self, host, *args, **kwargs):
super(DvrEdgeHaRouter, self).__init__(host,
*args, **kwargs)
super().__init__(host,
*args, **kwargs)
self.enable_snat = None
@property
@ -85,7 +85,7 @@ class DvrEdgeHaRouter(dvr_edge_router.DvrEdgeRouter,
self.set_ha_port()
if (self.is_router_primary() and self.ha_port and
self.ha_port['status'] == constants.PORT_STATUS_ACTIVE):
return super(DvrEdgeHaRouter, self).add_centralized_floatingip(
return super().add_centralized_floatingip(
fip, fip_cidr)
else:
return constants.FLOATINGIP_STATUS_ACTIVE
@ -93,7 +93,7 @@ class DvrEdgeHaRouter(dvr_edge_router.DvrEdgeRouter,
def remove_centralized_floatingip(self, fip_cidr):
self._remove_vip(fip_cidr)
if self.is_router_primary():
super(DvrEdgeHaRouter, self).remove_centralized_floatingip(
super().remove_centralized_floatingip(
fip_cidr)
def get_centralized_fip_cidr_set(self):
@ -105,7 +105,7 @@ class DvrEdgeHaRouter(dvr_edge_router.DvrEdgeRouter,
return set(self._get_cidrs_from_keepalived(interface_name))
def external_gateway_added(self, ex_gw_port, interface_name):
super(DvrEdgeHaRouter, self).external_gateway_added(
super().external_gateway_added(
ex_gw_port, interface_name)
for port in self.get_snat_interfaces():
snat_interface_name = self._get_snat_int_device_name(port['id'])
@ -124,7 +124,7 @@ class DvrEdgeHaRouter(dvr_edge_router.DvrEdgeRouter,
namespace=self.ha_namespace,
prefix=constants.SNAT_INT_DEV_PREFIX)
self._clear_vips(snat_interface)
super(DvrEdgeHaRouter, self)._external_gateway_removed(
super()._external_gateway_removed(
ex_gw_port, interface_name)
self._clear_vips(interface_name)
@ -140,7 +140,7 @@ class DvrEdgeHaRouter(dvr_edge_router.DvrEdgeRouter,
return self.agent_conf.agent_mode == constants.L3_AGENT_MODE_DVR_SNAT
def _dvr_internal_network_removed(self, port):
super(DvrEdgeHaRouter, self)._dvr_internal_network_removed(port)
super()._dvr_internal_network_removed(port)
sn_port = self.get_snat_port_for_internal_port(port, self.snat_ports)
if not sn_port:
return

View File

@ -28,7 +28,7 @@ LOG = logging.getLogger(__name__)
class DvrEdgeRouter(dvr_local_router.DvrLocalRouter):
def __init__(self, host, *args, **kwargs):
super(DvrEdgeRouter, self).__init__(host, *args, **kwargs)
super().__init__(host, *args, **kwargs)
self.snat_namespace = dvr_snat_ns.SnatNamespace(
self.router_id, self.agent_conf, self.driver, self.use_ipv6)
self.snat_iptables_manager = None
@ -37,7 +37,7 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter):
return self.snat_namespace.name
def external_gateway_added(self, ex_gw_port, interface_name):
super(DvrEdgeRouter, self).external_gateway_added(
super().external_gateway_added(
ex_gw_port, interface_name)
if self._is_this_snat_host():
self._create_dvr_gateway(ex_gw_port, interface_name)
@ -89,8 +89,8 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter):
preserve_ips)
def _external_gateway_removed(self, ex_gw_port, interface_name):
super(DvrEdgeRouter, self).external_gateway_removed(ex_gw_port,
interface_name)
super().external_gateway_removed(ex_gw_port,
interface_name)
if not self._is_this_snat_host() and not self.snat_namespace.exists():
# no centralized SNAT gateway for this node/agent
LOG.debug("not hosting snat for router: %s", self.router['id'])
@ -106,7 +106,7 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter):
self.snat_namespace.delete()
def internal_network_added(self, port):
super(DvrEdgeRouter, self).internal_network_added(port)
super().internal_network_added(port)
# TODO(gsagie) some of this checks are already implemented
# in the base class, think how to avoid re-doing them
@ -143,12 +143,12 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter):
prefix=lib_constants.SNAT_INT_DEV_PREFIX)
def internal_network_updated(self, port):
super(DvrEdgeRouter, self).internal_network_updated(port)
super().internal_network_updated(port)
if port:
self._set_snat_interfce_mtu(port)
def _dvr_internal_network_removed(self, port):
super(DvrEdgeRouter, self)._dvr_internal_network_removed(port)
super()._dvr_internal_network_removed(port)
if not self.ex_gw_port:
return
@ -178,7 +178,7 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter):
def initialize(self, process_monitor):
self._create_snat_namespace()
super(DvrEdgeRouter, self).initialize(process_monitor)
super().initialize(process_monitor)
def _create_dvr_gateway(self, ex_gw_port, gw_interface_name):
# connect snat_ports to br_int from SNAT namespace
@ -220,7 +220,7 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter):
return host == self.host
def _handle_router_snat_rules(self, ex_gw_port, interface_name):
super(DvrEdgeRouter, self)._handle_router_snat_rules(
super()._handle_router_snat_rules(
ex_gw_port, interface_name)
if not self._is_this_snat_host():
@ -259,21 +259,21 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter):
if self._should_update_snat_routing_table():
ns_name = self.snat_namespace.name
self._update_routing_table(operation, route, ns_name)
super(DvrEdgeRouter, self).update_routing_table(operation, route)
super().update_routing_table(operation, route)
def update_routing_table_ecmp(self, route_list):
if self._should_update_snat_routing_table():
ns_name = self.snat_namespace.name
self._update_routing_table_ecmp(route_list, ns_name)
super(DvrEdgeRouter, self).update_routing_table_ecmp(route_list)
super().update_routing_table_ecmp(route_list)
def delete(self):
super(DvrEdgeRouter, self).delete()
super().delete()
if self.snat_namespace.exists():
self.snat_namespace.delete()
def process_address_scope(self):
super(DvrEdgeRouter, self).process_address_scope()
super().process_address_scope()
if not self._is_this_snat_host():
return
@ -327,9 +327,9 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter):
return set()
interface_name = self.get_snat_external_device_interface_name(
ex_gw_port)
return set([addr['cidr'] for addr in ip_lib.get_devices_with_ip(
return {addr['cidr'] for addr in ip_lib.get_devices_with_ip(
self.snat_namespace.name,
name=interface_name)])
name=interface_name)}
def get_router_cidrs(self, device):
"""Over-ride the get_router_cidrs function to return the list.
@ -339,7 +339,7 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter):
This includes the centralized floatingip cidr list and the
regular floatingip cidr list that are bound to fip namespace.
"""
fip_cidrs = super(DvrEdgeRouter, self).get_router_cidrs(device)
fip_cidrs = super().get_router_cidrs(device)
centralized_cidrs = self.get_centralized_fip_cidr_set()
return fip_cidrs | centralized_cidrs
@ -382,7 +382,8 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter):
return lib_constants.FLOATINGIP_STATUS_ACTIVE
def _centralized_floating_forward_rules(self, floating_ip, fixed_ip):
to_source = '-s %s/32 -j SNAT --to-source %s' % (fixed_ip, floating_ip)
to_source = '-s {}/32 -j SNAT --to-source {}'.format(
fixed_ip, floating_ip)
if self.snat_iptables_manager.random_fully:
to_source += ' --random-fully'
return [('PREROUTING', '-d %s/32 -j DNAT --to-destination %s' %
@ -417,4 +418,4 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter):
# Cover mixed dvr_snat and compute node, aka a dvr_snat node has both
# centralized and distributed floating IPs.
super(DvrEdgeRouter, self).process_floating_ip_nat_rules()
super().process_floating_ip_nat_rules()

View File

@ -51,7 +51,7 @@ class FipNamespace(namespaces.Namespace):
def __init__(self, ext_net_id, agent_conf, driver, use_ipv6):
name = self._get_ns_name(ext_net_id)
super(FipNamespace, self).__init__(
super().__init__(
name, agent_conf, driver, use_ipv6)
self._ext_net_id = ext_net_id
@ -208,7 +208,7 @@ class FipNamespace(namespaces.Namespace):
def create(self):
LOG.debug("DVR: add fip namespace: %s", self.name)
# parent class will ensure the namespace exists and turn-on forwarding
super(FipNamespace, self).create()
super().create()
ip_lib.set_ip_nonlocal_bind_for_namespace(self.name, 1,
root_namespace=True)
@ -239,7 +239,7 @@ class FipNamespace(namespaces.Namespace):
# TODO(mrsmith): add LOG warn if fip count != 0
LOG.debug('DVR: destroy fip namespace: %s', self.name)
super(FipNamespace, self).delete()
super().delete()
def _check_for_gateway_ip_change(self, new_agent_gateway_port):

View File

@ -38,7 +38,7 @@ Arp_entry = collections.namedtuple(
class DvrLocalRouter(dvr_router_base.DvrRouterBase):
def __init__(self, host, *args, **kwargs):
super(DvrLocalRouter, self).__init__(host, *args, **kwargs)
super().__init__(host, *args, **kwargs)
self.floating_ips_dict = {}
# Linklocal subnet for router and floating IP namespace link
@ -132,9 +132,10 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase):
floating_ip = fip['floating_ip_address']
rtr_2_fip_name = self.fip_ns.get_rtr_ext_device_name(self.router_id)
dnat_from_floatingip_to_fixedip = (
'PREROUTING', '-d %s/32 -i %s -j DNAT --to-destination %s' % (
'PREROUTING', '-d {}/32 -i {} -j DNAT --to-destination {}'.format(
floating_ip, rtr_2_fip_name, fixed_ip))
to_source = '-s %s/32 -j SNAT --to-source %s' % (fixed_ip, floating_ip)
to_source = '-s {}/32 -j SNAT --to-source {}'.format(
fixed_ip, floating_ip)
if self.iptables_manager.random_fully:
to_source += ' --random-fully'
snat_from_fixedip_to_floatingip = ('float-snat', to_source)
@ -147,7 +148,7 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase):
rtr_2_fip_name = self.fip_ns.get_rtr_ext_device_name(self.router_id)
mark_traffic_to_floating_ip = (
'floatingip', '-d %s/32 -i %s -j MARK --set-xmark %s' % (
'floatingip', '-d {}/32 -i {} -j MARK --set-xmark {}'.format(
floating_ip, rtr_2_fip_name, internal_mark))
mark_traffic_from_fixed_ip = (
'FORWARD', '-s %s/32 -j $float-snat' % fixed_ip)
@ -505,7 +506,7 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase):
self._snat_redirect_modify(gateway, sn_port, sn_int, is_add=False)
def internal_network_added(self, port):
super(DvrLocalRouter, self).internal_network_added(port)
super().internal_network_added(port)
# NOTE: The following function _set_subnet_arp_info
# should be called to dynamically populate the arp
@ -576,7 +577,7 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase):
def internal_network_removed(self, port):
self._dvr_internal_network_removed(port)
super(DvrLocalRouter, self).internal_network_removed(port)
super().internal_network_removed(port)
def get_floating_agent_gw_interface(self, ext_net_id):
"""Filter Floating Agent GW port for the external network."""
@ -739,7 +740,7 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase):
if ex_gw_port:
self.create_dvr_external_gateway_on_agent(ex_gw_port)
self.connect_rtr_2_fip()
super(DvrLocalRouter, self).process_external()
super().process_external()
def _check_rtr_2_fip_connect(self):
"""Checks if the rtr to fip connect exists, if not sets to false."""
@ -866,7 +867,7 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase):
tbl_index = self._get_snat_idx(fip_2_rtr)
self._update_fip_route_table_with_next_hop_routes(
operation, route, fip_ns_name, tbl_index)
super(DvrLocalRouter, self).update_routing_table(operation, route)
super().update_routing_table(operation, route)
def _update_fip_route_table_with_next_hop_routes(self, operation, route,
fip_ns_name, tbl_index):
@ -923,4 +924,4 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase):
self.fip_ns = self.agent.get_fip_ns(ex_gw_port['network_id'])
self.fip_ns.scan_fip_ports(self)
super(DvrLocalRouter, self).process()
super().process()

View File

@ -20,13 +20,13 @@ LOG = logging.getLogger(__name__)
class DvrRouterBase(router.RouterInfo):
def __init__(self, host, *args, **kwargs):
super(DvrRouterBase, self).__init__(*args, **kwargs)
super().__init__(*args, **kwargs)
self.host = host
self.snat_ports = None
def process(self):
super(DvrRouterBase, self).process()
super().process()
# NOTE: Keep a copy of the interfaces around for when they are removed
self.snat_ports = self.get_snat_interfaces()

View File

@ -25,11 +25,11 @@ class SnatNamespace(namespaces.Namespace):
def __init__(self, router_id, agent_conf, driver, use_ipv6):
self.router_id = router_id
name = self.get_snat_ns_name(router_id)
super(SnatNamespace, self).__init__(
super().__init__(
name, agent_conf, driver, use_ipv6)
def create(self):
super(SnatNamespace, self).create()
super().create()
# Set nonlocal_bind to 1 to allow setup applications in HA mode
# for example ipsec from VPNaaS
ip_lib.set_ip_nonlocal_bind_for_namespace(self.name, 1)
@ -58,4 +58,4 @@ class SnatNamespace(namespaces.Namespace):
# TODO(mrsmith): delete ext-gw-port
LOG.debug('DVR: destroy snat ns: %s', self.name)
super(SnatNamespace, self).delete()
super().delete()

View File

@ -33,7 +33,7 @@ CONNTRACK_HELPER_PREFIX = 'cthelper-'
CONNTRACK_HELPER_CHAIN_PREFIX = DEFAULT_CONNTRACK_HELPER_CHAIN + '-'
class ConntrackHelperMapping(object):
class ConntrackHelperMapping:
def __init__(self):
self._managed_conntrack_helpers = {}
@ -131,8 +131,8 @@ class ConntrackHelperAgentExtension(l3_extension.L3AgentExtension):
:constants.MAX_IPTABLES_CHAIN_LEN_WRAP]
def _install_default_rules(self, iptables_manager, version):
default_rule = '-j %s-%s' % (iptables_manager.wrap_name,
DEFAULT_CONNTRACK_HELPER_CHAIN)
default_rule = '-j {}-{}'.format(iptables_manager.wrap_name,
DEFAULT_CONNTRACK_HELPER_CHAIN)
if version == constants.IPv4:
iptables_manager.ipv4['raw'].add_chain(
DEFAULT_CONNTRACK_HELPER_CHAIN)
@ -146,7 +146,7 @@ class ConntrackHelperAgentExtension(l3_extension.L3AgentExtension):
def _get_chain_rules_list(self, conntrack_helper, wrap_name):
chain_name = self._get_chain_name(conntrack_helper.id)
chain_rule_list = [(DEFAULT_CONNTRACK_HELPER_CHAIN,
'-j %s-%s' % (wrap_name, chain_name))]
'-j {}-{}'.format(wrap_name, chain_name))]
chain_rule_list.append((chain_name,
'-p %(proto)s --dport %(dport)s -j CT '
'--helper %(helper)s' %

View File

@ -34,7 +34,7 @@ LOG = logging.getLogger(__name__)
DEFAULT_NDP_PROXY_CHAIN = 'NDP'
class RouterNDPProxyMapping(object):
class RouterNDPProxyMapping:
def __init__(self):
self.managed_ndp_proxies = {}
@ -217,7 +217,7 @@ class NDPProxyAgentExtension(l3_extension.L3AgentExtension):
cmd = ['ip', '-6', 'neigh', 'add',
'proxy', v6_address, 'dev', interface_name]
ip_wrapper.netns.execute(cmd, privsep_exec=True)
accept_rule = '-i %s --destination %s -j ACCEPT' % (
accept_rule = '-i {} --destination {} -j ACCEPT'.format(
interface_name, v6_address)
iptables_manager.ipv6['filter'].add_rule(
DEFAULT_NDP_PROXY_CHAIN, accept_rule, top=True)
@ -251,7 +251,7 @@ class NDPProxyAgentExtension(l3_extension.L3AgentExtension):
cmd = ['ip', '-6', 'neigh', 'del',
'proxy', v6_address, 'dev', interface_name]
ip_wrapper.netns.execute(cmd, privsep_exec=True)
accept_rule = '-i %s --destination %s -j ACCEPT' % (
accept_rule = '-i {} --destination {} -j ACCEPT'.format(
interface_name, v6_address)
iptables_manager.ipv6['filter'].remove_rule(
DEFAULT_NDP_PROXY_CHAIN, accept_rule, top=True)

View File

@ -36,7 +36,7 @@ PORT_FORWARDING_PREFIX = 'fip_portforwarding-'
PORT_FORWARDING_CHAIN_PREFIX = 'pf-'
class RouterFipPortForwardingMapping(object):
class RouterFipPortForwardingMapping:
def __init__(self):
self.managed_port_forwardings = {}
"""
@ -386,9 +386,9 @@ class PortForwardingAgentExtension(l3_extension.L3AgentExtension):
iptables_manager.apply()
fip_id_cidrs = set([(pf.floatingip_id,
str(netaddr.IPNetwork(pf.floating_ip_address)))
for pf in port_forwardings])
fip_id_cidrs = {(pf.floatingip_id,
str(netaddr.IPNetwork(pf.floating_ip_address)))
for pf in port_forwardings}
self._sync_and_remove_fip(context, fip_id_cidrs, device, ri)
self._store_local(port_forwardings, events.DELETED)
@ -431,8 +431,8 @@ class PortForwardingAgentExtension(l3_extension.L3AgentExtension):
return chain_name[:constants.MAX_IPTABLES_CHAIN_LEN_WRAP]
def _install_default_rules(self, iptables_manager):
default_rule = '-j %s-%s' % (iptables_manager.wrap_name,
DEFAULT_PORT_FORWARDING_CHAIN)
default_rule = '-j {}-{}'.format(iptables_manager.wrap_name,
DEFAULT_PORT_FORWARDING_CHAIN)
iptables_manager.ipv4['nat'].add_chain(DEFAULT_PORT_FORWARDING_CHAIN)
iptables_manager.ipv4['nat'].add_rule('PREROUTING', default_rule)
iptables_manager.apply()

View File

@ -50,7 +50,7 @@ IP_DEFAULT_RATE = 0
IP_DEFAULT_BURST = 0
class RateLimitMaps(object):
class RateLimitMaps:
def __init__(self, lock_name):
# qos_policy_2_resources = {qos_id_1: {res_1, res_2, res_3, ...} }
@ -138,7 +138,7 @@ class RateLimitMaps(object):
del self.known_policies[qos_policy_id]
class L3QosAgentExtensionBase(object):
class L3QosAgentExtensionBase:
SUPPORTED_RESOURCE_TYPES = [resources.QOS_POLICY]
def consume_api(self, agent_api):

View File

@ -60,7 +60,7 @@ class RouterFipRateLimitMaps(qos_base.RateLimitMaps):
"""
self.ingress_ratelimits = {}
self.egress_ratelimits = {}
super(RouterFipRateLimitMaps, self).__init__(self.LOCK_NAME)
super().__init__(self.LOCK_NAME)
def get_router_id_by_fip(self, fip_res):
@ -74,7 +74,7 @@ class RouterFipRateLimitMaps(qos_base.RateLimitMaps):
@lockutils.synchronized(self.lock_name)
def _get_fips_by_router_id():
return self._router_2_fips.get(router_id, set([]))
return self._router_2_fips.get(router_id, set())
return _get_fips_by_router_id()
@ -94,7 +94,7 @@ class RouterFipRateLimitMaps(qos_base.RateLimitMaps):
@lockutils.synchronized(self.lock_name)
def _delete_fips():
router_ids = set([])
router_ids = set()
for fip_res in fips:
router_id = self._fips_2_router.pop(fip_res, None)
if router_id:
@ -337,7 +337,7 @@ class FipQosAgentExtension(qos_base.L3QosAgentExtensionBase,
router_info.get_port_forwarding_fips())
current_fips = self.fip_qos_map.get_fips_by_router_id(
router_info.router_id)
new_fips = set([])
new_fips = set()
for fip in floating_ips:
fip_res = FipResource(fip['id'], fip['floating_ip_address'])
new_fips.add(fip_res)

View File

@ -15,7 +15,7 @@
from neutron.agent.l3.item_allocator import ItemAllocator
class FipPriority(object):
class FipPriority:
def __init__(self, index):
self.index = index
@ -48,9 +48,9 @@ class FipRulePriorityAllocator(ItemAllocator):
using ',' as the delimiter and FipRulePriorityAllocator as the
class type
"""
pool = set(FipPriority(str(s)) for s in range(priority_rule_start,
priority_rule_end))
pool = {FipPriority(str(s)) for s in range(priority_rule_start,
priority_rule_end)}
super(FipRulePriorityAllocator, self).__init__(data_store_path,
FipPriority,
pool)
super().__init__(data_store_path,
FipPriority,
pool)

View File

@ -39,7 +39,7 @@ TRANSLATION_MAP = {'primary': constants.HA_ROUTER_STATE_ACTIVE,
'unknown': constants.HA_ROUTER_STATE_UNKNOWN}
class KeepalivedStateChangeHandler(object):
class KeepalivedStateChangeHandler:
def __init__(self, agent):
self.agent = agent
@ -56,7 +56,7 @@ class KeepalivedStateChangeHandler(object):
self.agent.enqueue_state_change(router_id, state)
class L3AgentKeepalivedStateChangeServer(object):
class L3AgentKeepalivedStateChangeServer:
def __init__(self, agent, conf):
self.agent = agent
self.conf = conf
@ -80,10 +80,10 @@ class L3AgentKeepalivedStateChangeServer(object):
@registry.has_registry_receivers
class AgentMixin(object):
class AgentMixin:
def __init__(self, host):
self._init_ha_conf_path()
super(AgentMixin, self).__init__(host)
super().__init__(host)
# BatchNotifier queue is needed to ensure that the HA router
# state change sequence is under the proper order.
self.state_change_notifier = batch_notifier.BatchNotifier(
@ -254,8 +254,8 @@ class AgentMixin(object):
ri.disable_radvd()
def notify_server(self, batched_events):
translated_states = dict((router_id, TRANSLATION_MAP[state]) for
router_id, state in batched_events)
translated_states = {router_id: TRANSLATION_MAP[state] for
router_id, state in batched_events}
LOG.debug('Updating server with HA routers states %s',
translated_states)
self.plugin_rpc.update_ha_routers_states(

View File

@ -64,7 +64,7 @@ class HaRouterNamespace(namespaces.RouterNamespace):
which cause lost connectivity to Floating IPs.
"""
def create(self):
super(HaRouterNamespace, self).create(ipv6_forwarding=False)
super().create(ipv6_forwarding=False)
# HA router namespaces should have ip_nonlocal_bind enabled
ip_lib.set_ip_nonlocal_bind_for_namespace(self.name, 1)
# Linux should not automatically assign link-local addr for HA routers
@ -76,7 +76,7 @@ class HaRouterNamespace(namespaces.RouterNamespace):
class HaRouter(router.RouterInfo):
def __init__(self, *args, **kwargs):
super(HaRouter, self).__init__(*args, **kwargs)
super().__init__(*args, **kwargs)
self.ha_port = None
self.keepalived_manager = None
@ -119,14 +119,14 @@ class HaRouter(router.RouterInfo):
if self._ha_state:
return self._ha_state
try:
with open(self.ha_state_path, 'r') as f:
with open(self.ha_state_path) as f:
# TODO(haleyb): put old code back after a couple releases,
# Y perhaps, just for backwards-compat
# self._ha_state = f.read()
ha_state = f.read()
ha_state = 'primary' if ha_state == 'master' else ha_state
self._ha_state = ha_state
except (OSError, IOError) as error:
except OSError as error:
LOG.debug('Error while reading HA state for %s: %s',
self.router_id, error)
return self._ha_state or 'unknown'
@ -137,7 +137,7 @@ class HaRouter(router.RouterInfo):
try:
with open(self.ha_state_path, 'w') as f:
f.write(new_state)
except (OSError, IOError) as error:
except OSError as error:
LOG.error('Error while writing HA state for %s: %s',
self.router_id, error)
@ -161,7 +161,7 @@ class HaRouter(router.RouterInfo):
self.router_id)
LOG.exception(msg)
raise Exception(msg)
super(HaRouter, self).initialize(process_monitor)
super().initialize(process_monitor)
self.set_ha_port()
self._init_keepalived_manager(process_monitor)
@ -288,7 +288,7 @@ class HaRouter(router.RouterInfo):
route['destination'], route['nexthop'])
for route in new_routes]
if self.router.get('distributed', False):
super(HaRouter, self).routes_updated(old_routes, new_routes)
super().routes_updated(old_routes, new_routes)
self.keepalived_manager.get_process().reload_cfg()
def _add_default_gw_virtual_route(self, ex_gw_port, interface_name):
@ -315,7 +315,7 @@ class HaRouter(router.RouterInfo):
def _add_extra_subnet_onlink_routes(self, ex_gw_port, interface_name):
extra_subnets = ex_gw_port.get('extra_subnets', [])
instance = self._get_keepalived_instance()
onlink_route_cidrs = set(s['cidr'] for s in extra_subnets)
onlink_route_cidrs = {s['cidr'] for s in extra_subnets}
instance.virtual_routes.extra_subnets = [
keepalived.KeepalivedVirtualRoute(
onlink_route_cidr, None, interface_name, scope='link') for
@ -375,7 +375,7 @@ class HaRouter(router.RouterInfo):
self._remove_vip(ip_cidr)
to = common_utils.cidr_to_ip(ip_cidr)
if device.addr.list(to=to):
super(HaRouter, self).remove_floating_ip(device, ip_cidr)
super().remove_floating_ip(device, ip_cidr)
def internal_network_updated(self, port):
interface_name = self.get_internal_device_name(port['id'])
@ -407,7 +407,7 @@ class HaRouter(router.RouterInfo):
port, self.get_internal_device_name, router.INTERNAL_DEV_PREFIX)
def internal_network_removed(self, port):
super(HaRouter, self).internal_network_removed(port)
super().internal_network_removed(port)
interface_name = self.get_internal_device_name(port['id'])
self._clear_vips(interface_name)
@ -483,8 +483,8 @@ class HaRouter(router.RouterInfo):
def _get_filtered_dict(d, ignore):
return {k: v for k, v in d.items() if k not in ignore}
keys_to_ignore = set([portbindings.HOST_ID, timestamp.UPDATED,
revisions.REVISION])
keys_to_ignore = {portbindings.HOST_ID, timestamp.UPDATED,
revisions.REVISION}
port1_filtered = _get_filtered_dict(port1, keys_to_ignore)
port2_filtered = _get_filtered_dict(port2, keys_to_ignore)
return port1_filtered == port2_filtered
@ -513,8 +513,8 @@ class HaRouter(router.RouterInfo):
self._clear_vips(interface_name)
if self.ha_state == 'primary':
super(HaRouter, self).external_gateway_removed(ex_gw_port,
interface_name)
super().external_gateway_removed(ex_gw_port,
interface_name)
else:
# We are not the primary node, so no need to delete ip addresses.
self.driver.unplug(interface_name,
@ -526,7 +526,7 @@ class HaRouter(router.RouterInfo):
self.destroy_state_change_monitor(self.process_monitor)
self.disable_keepalived()
self.ha_network_removed()
super(HaRouter, self).delete()
super().delete()
def set_ha_port(self):
ha_port = self.router.get(n_consts.HA_INTERFACE_KEY)
@ -541,7 +541,7 @@ class HaRouter(router.RouterInfo):
self.ha_port = ha_port
def process(self):
super(HaRouter, self).process()
super().process()
self.set_ha_port()
LOG.debug("Processing HA router %(router_id)s with HA port: %(port)s",
@ -555,4 +555,4 @@ class HaRouter(router.RouterInfo):
def enable_radvd(self, internal_ports=None):
if (self.keepalived_manager.get_process().active and
self.ha_state == 'primary'):
super(HaRouter, self).enable_radvd(internal_ports)
super().enable_radvd(internal_ports)

View File

@ -21,7 +21,7 @@ from neutron._i18n import _
LOG = logging.getLogger(__name__)
class ItemAllocator(object):
class ItemAllocator:
"""Manages allocation of items from a pool
Some of the allocations such as link local addresses used for routing
@ -120,8 +120,9 @@ class ItemAllocator(object):
self._write_allocations()
def _write_allocations(self):
current = ["%s,%s\n" % (k, v) for k, v in self.allocations.items()]
remembered = ["%s,%s\n" % (k, v) for k, v in self.remembered.items()]
current = ["{},{}\n".format(k, v) for k, v in self.allocations.items()]
remembered = ["{},{}\n".format(k, v)
for k, v in self.remembered.items()]
current.extend(remembered)
self._write(current)

View File

@ -60,8 +60,8 @@ class MonitorDaemon(daemon.Daemon):
self.event_started = threading.Event()
self.queue = queue.Queue()
self._initial_state = None
super(MonitorDaemon, self).__init__(pidfile, uuid=router_id,
user=user, group=group)
super().__init__(pidfile, uuid=router_id,
user=user, group=group)
@property
def initial_state(self):
@ -163,7 +163,7 @@ class MonitorDaemon(daemon.Daemon):
def handle_sigterm(self, signum, frame):
self.event_stop.set()
self._thread_read_queue.join(timeout=5)
super(MonitorDaemon, self).handle_sigterm(signum, frame)
super().handle_sigterm(signum, frame)
def configure(conf):

View File

@ -16,7 +16,7 @@
from neutron.agent.linux import ip_lib
class L3AgentExtensionAPI(object):
class L3AgentExtensionAPI:
'''Implements the Agent API for the L3 agent.
Extensions can gain access to this API by overriding the consume_api

View File

@ -34,8 +34,7 @@ class L3AgentExtensionsManager(agent_ext_manager.AgentExtensionsManager):
"""Manage l3 agent extensions."""
def __init__(self, conf):
super(L3AgentExtensionsManager,
self).__init__(conf, L3_AGENT_EXT_MANAGER_NAMESPACE)
super().__init__(conf, L3_AGENT_EXT_MANAGER_NAMESPACE)
extensions = []
for extension in self:
if not isinstance(extension.obj, (l3_extension.L3AgentExtension,)):

View File

@ -19,14 +19,15 @@ from neutron.agent.l3.item_allocator import ItemAllocator
class LinkLocalAddressPair(netaddr.IPNetwork):
def __init__(self, addr):
super(LinkLocalAddressPair, self).__init__(addr)
super().__init__(addr)
def get_pair(self):
"""Builds an address pair from the first and last addresses. """
# TODO(kevinbenton): the callers of this seem only interested in an IP,
# so we should just return two IPAddresses.
return (netaddr.IPNetwork("%s/%s" % (self.network, self.prefixlen)),
netaddr.IPNetwork("%s/%s" % (self[-1], self.prefixlen)))
return (
netaddr.IPNetwork("{}/{}".format(self.network, self.prefixlen)),
netaddr.IPNetwork("{}/{}".format(self[-1], self.prefixlen)))
class LinkLocalAllocator(ItemAllocator):
@ -46,7 +47,7 @@ class LinkLocalAllocator(ItemAllocator):
class type
"""
subnet = netaddr.IPNetwork(subnet)
pool = set(LinkLocalAddressPair(s) for s in subnet.subnet(31))
super(LinkLocalAllocator, self).__init__(data_store_path,
LinkLocalAddressPair,
pool)
pool = {LinkLocalAddressPair(s) for s in subnet.subnet(31)}
super().__init__(data_store_path,
LinkLocalAddressPair,
pool)

View File

@ -21,7 +21,7 @@ from neutron.agent.linux import ip_lib
LOG = logging.getLogger(__name__)
class NamespaceManager(object):
class NamespaceManager:
"""Keeps track of namespaces that need to be cleaned up.
@ -115,7 +115,7 @@ class NamespaceManager(object):
"""Get a set of all namespaces on host managed by this manager."""
try:
namespaces = ip_lib.list_network_namespaces()
return set(ns for ns in namespaces if self.is_managed(ns))
return {ns for ns in namespaces if self.is_managed(ns)}
except RuntimeError:
LOG.exception('RuntimeError in obtaining namespace list for '
'namespace cleanup.')

View File

@ -79,7 +79,7 @@ def check_ns_existence(f):
return wrapped
class Namespace(object):
class Namespace:
def __init__(self, name, agent_conf, driver, use_ipv6):
self.name = name
@ -126,7 +126,7 @@ class RouterNamespace(Namespace):
def __init__(self, router_id, agent_conf, driver, use_ipv6):
self.router_id = router_id
name = self._get_ns_name(router_id)
super(RouterNamespace, self).__init__(
super().__init__(
name, agent_conf, driver, use_ipv6)
@classmethod
@ -149,4 +149,4 @@ class RouterNamespace(Namespace):
namespace=self.name,
prefix=EXTERNAL_DEV_PREFIX)
super(RouterNamespace, self).delete()
super().delete()

View File

@ -44,7 +44,7 @@ ADDRESS_SCOPE_MARK_ID_MAX = 2048
DEFAULT_ADDRESS_SCOPE = "noscope"
class BaseRouterInfo(object, metaclass=abc.ABCMeta):
class BaseRouterInfo(metaclass=abc.ABCMeta):
def __init__(self,
agent,
@ -130,8 +130,8 @@ class RouterInfo(BaseRouterInfo):
agent_conf,
interface_driver,
use_ipv6=False):
super(RouterInfo, self).__init__(agent, router_id, router, agent_conf,
interface_driver, use_ipv6)
super().__init__(agent, router_id, router, agent_conf,
interface_driver, use_ipv6)
self.ex_gw_port = None
self.fip_map = {}
@ -158,7 +158,7 @@ class RouterInfo(BaseRouterInfo):
self.qos_gateway_ips = set()
def initialize(self, process_monitor):
super(RouterInfo, self).initialize(process_monitor)
super().initialize(process_monitor)
self.radvd = ra.DaemonMonitor(self.router_id,
self.ns_name,
process_monitor,
@ -266,7 +266,8 @@ class RouterInfo(BaseRouterInfo):
def floating_forward_rules(self, fip):
fixed_ip = fip['fixed_ip_address']
floating_ip = fip['floating_ip_address']
to_source = '-s %s/32 -j SNAT --to-source %s' % (fixed_ip, floating_ip)
to_source = '-s {}/32 -j SNAT --to-source {}'.format(
fixed_ip, floating_ip)
if self.iptables_manager.random_fully:
to_source += ' --random-fully'
return [('PREROUTING', '-d %s/32 -j DNAT --to-destination %s' %
@ -277,7 +278,7 @@ class RouterInfo(BaseRouterInfo):
def floating_mangle_rules(self, floating_ip, fixed_ip, internal_mark):
mark_traffic_to_floating_ip = (
'floatingip', '-d %s/32 -j MARK --set-xmark %s' % (
'floatingip', '-d {}/32 -j MARK --set-xmark {}'.format(
floating_ip, internal_mark))
mark_traffic_from_fixed_ip = (
'FORWARD', '-s %s/32 -j $float-snat' % fixed_ip)
@ -293,7 +294,7 @@ class RouterInfo(BaseRouterInfo):
mark_id = self._address_scope_to_mark_id[address_scope]
# NOTE: Address scopes use only the upper 16 bits of the 32 fwmark
return "%s/%s" % (hex(mark_id << 16), ADDRESS_SCOPE_MARK_MASK)
return "{}/{}".format(hex(mark_id << 16), ADDRESS_SCOPE_MARK_MASK)
def get_port_address_scope_mark(self, port):
"""Get the IP version 4 and 6 address scope mark for the port
@ -427,7 +428,7 @@ class RouterInfo(BaseRouterInfo):
device.delete_addr_and_conntrack_state(ip_cidr)
def get_router_cidrs(self, device):
return set([addr['cidr'] for addr in device.addr.list()])
return {addr['cidr'] for addr in device.addr.list()}
def get_centralized_fip_cidr_set(self):
return set()
@ -655,18 +656,18 @@ class RouterInfo(BaseRouterInfo):
namespace=self.ns_name)
def address_scope_mangle_rule(self, device_name, mark_mask):
return '-i %s -j MARK --set-xmark %s' % (device_name, mark_mask)
return '-i {} -j MARK --set-xmark {}'.format(device_name, mark_mask)
def address_scope_filter_rule(self, device_name, mark_mask):
return '-o %s -m mark ! --mark %s -j DROP' % (
return '-o {} -m mark ! --mark {} -j DROP'.format(
device_name, mark_mask)
def _process_internal_ports(self):
existing_port_ids = set(p['id'] for p in self.internal_ports)
existing_port_ids = {p['id'] for p in self.internal_ports}
internal_ports = self.router.get(lib_constants.INTERFACE_KEY, [])
current_port_ids = set(p['id'] for p in internal_ports
if p['admin_state_up'])
current_port_ids = {p['id'] for p in internal_ports
if p['admin_state_up']}
new_port_ids = current_port_ids - existing_port_ids
new_ports = [p for p in internal_ports if p['id'] in new_port_ids]
@ -731,10 +732,10 @@ class RouterInfo(BaseRouterInfo):
self.enable_radvd(internal_ports)
existing_devices = self._get_existing_devices()
current_internal_devs = set(n for n in existing_devices
if n.startswith(INTERNAL_DEV_PREFIX))
current_port_devs = set(self.get_internal_device_name(port_id)
for port_id in current_port_ids)
current_internal_devs = {n for n in existing_devices
if n.startswith(INTERNAL_DEV_PREFIX)}
current_port_devs = {self.get_internal_device_name(port_id)
for port_id in current_port_ids}
stale_devs = current_internal_devs - current_port_devs
for stale_dev in stale_devs:
LOG.debug('Deleting stale internal router device: %s',
@ -959,7 +960,8 @@ class RouterInfo(BaseRouterInfo):
snat_internal_traffic_to_floating_ip]
def external_gateway_nat_snat_rules(self, ex_gw_ip, interface_name):
to_source = '-o %s -j SNAT --to-source %s' % (interface_name, ex_gw_ip)
to_source = '-o {} -j SNAT --to-source {}'.format(
interface_name, ex_gw_ip)
if self.iptables_manager.random_fully:
to_source += ' --random-fully'
return [('snat', to_source)]
@ -1135,7 +1137,7 @@ class RouterInfo(BaseRouterInfo):
'value': self.agent_conf.metadata_access_mark,
'mask': lib_constants.ROUTER_MARK_MASK})
drop_non_local_metadata = (
'-m mark --mark %s/%s -j DROP' % (
'-m mark --mark {}/{} -j DROP'.format(
self.agent_conf.metadata_access_mark,
lib_constants.ROUTER_MARK_MASK))
self.iptables_manager.ipv4['mangle'].add_rule(
@ -1154,7 +1156,7 @@ class RouterInfo(BaseRouterInfo):
'value': self.agent_conf.metadata_access_mark,
'mask': lib_constants.ROUTER_MARK_MASK})
drop_non_local_v6_metadata = (
'-m mark --mark %s/%s -j DROP' % (
'-m mark --mark {}/{} -j DROP'.format(
self.agent_conf.metadata_access_mark,
lib_constants.ROUTER_MARK_MASK))
self.iptables_manager.ipv6['mangle'].add_rule(
@ -1265,7 +1267,7 @@ class RouterInfo(BaseRouterInfo):
return
# Prevents snat within the same address scope
rule = '-o %s -m connmark --mark %s -j ACCEPT' % (
rule = '-o {} -m connmark --mark {} -j ACCEPT'.format(
external_devicename,
self.get_address_scope_mark_mask(address_scope))
iptables_manager.ipv4['nat'].add_rule('snat', rule)
@ -1311,8 +1313,8 @@ class RouterInfo(BaseRouterInfo):
# Update ex_gw_port on the router info cache
self.ex_gw_port = self.get_ex_gw_port()
self.fip_map = dict((fip['floating_ip_address'],
fip['fixed_ip_address'])
for fip in self.get_floating_ips())
self.fip_map = {fip['floating_ip_address']:
fip['fixed_ip_address']
for fip in self.get_floating_ips()}
self.fip_managed_by_port_forwardings = self.router.get(
'fip_managed_by_port_forwardings')

View File

@ -60,9 +60,9 @@ def is_bridged_interface(interface):
def get_interface_ifindex(interface):
try:
with open(os.path.join(BRIDGE_FS, interface, 'ifindex'), 'r') as fh:
with open(os.path.join(BRIDGE_FS, interface, 'ifindex')) as fh:
return int(fh.read().strip())
except (IOError, ValueError):
except (OSError, ValueError):
pass
@ -129,7 +129,7 @@ class BridgeDevice(ip_lib.IPDevice):
return []
class FdbInterface(object):
class FdbInterface:
"""Provide basic functionality to edit the FDB table"""
@staticmethod

View File

@ -117,7 +117,7 @@ def drop_privileges(user=None, group=None):
{'uid': os.getuid(), 'gid': os.getgid()})
class Pidfile(object):
class Pidfile:
def __init__(self, pidfile, procname, uuid=None):
self.pidfile = pidfile
self.procname = procname
@ -125,7 +125,7 @@ class Pidfile(object):
try:
self.fd = os.open(pidfile, os.O_CREAT | os.O_RDWR)
fcntl.flock(self.fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
except OSError:
LOG.exception("Error while handling pidfile: %s", pidfile)
sys.exit(1)
@ -155,15 +155,15 @@ class Pidfile(object):
cmdline = '/proc/%s/cmdline' % pid
try:
with open(cmdline, "r") as f:
with open(cmdline) as f:
exec_out = f.readline()
return self.procname in exec_out and (not self.uuid or
self.uuid in exec_out)
except IOError:
except OSError:
return False
class Daemon(object):
class Daemon:
"""A generic daemon class.
Usage: subclass the Daemon class and override the run() method
@ -250,7 +250,7 @@ class Daemon(object):
self.run()
def _set_process_title(self):
proctitle = "%s (%s)" % (self.procname, self._parent_proctitle)
proctitle = "{} ({})".format(self.procname, self._parent_proctitle)
setproctitle.setproctitle(proctitle)
def run(self):

View File

@ -105,7 +105,7 @@ class DictModel(collections.abc.MutableMapping):
if isinstance(value, (list, tuple)):
# Keep the same type but convert dicts to DictModels
self._dictmodel_internal_storage[key] = type(value)(
(upgrade(item) for item in value)
upgrade(item) for item in value
)
elif needs_upgrade(value):
# Change dict instance values to DictModel instance values
@ -116,14 +116,14 @@ class DictModel(collections.abc.MutableMapping):
def __getattr__(self, name):
try:
if name == '_dictmodel_internal_storage':
return super(DictModel, self).__getattr__(name)
return super().__getattr__(name)
return self.__getitem__(name)
except KeyError as e:
raise AttributeError(e)
def __setattr__(self, name, value):
if name == '_dictmodel_internal_storage':
super(DictModel, self).__setattr__(name, value)
super().__setattr__(name, value)
else:
self._dictmodel_internal_storage[name] = value
@ -131,7 +131,7 @@ class DictModel(collections.abc.MutableMapping):
del self._dictmodel_internal_storage[name]
def __str__(self):
pairs = ['%s=%s' % (k, v) for k, v in
pairs = ['{}={}'.format(k, v) for k, v in
self._dictmodel_internal_storage.items()]
return ', '.join(sorted(pairs))
@ -169,9 +169,9 @@ class DictModel(collections.abc.MutableMapping):
class NetModel(DictModel):
def __init__(self, *args, **kwargs):
super(NetModel, self).__init__(*args, **kwargs)
super().__init__(*args, **kwargs)
self._ns_name = "%s%s" % (NS_PREFIX, self.id)
self._ns_name = "{}{}".format(NS_PREFIX, self.id)
@property
def namespace(self):
@ -186,7 +186,7 @@ class NetModel(DictModel):
return self['tenant_id']
class DhcpBase(object, metaclass=abc.ABCMeta):
class DhcpBase(metaclass=abc.ABCMeta):
def __init__(self, conf, network, process_monitor,
version=None, plugin=None, segment=None):
@ -255,8 +255,8 @@ class DhcpLocalProcess(DhcpBase, metaclass=abc.ABCMeta):
def __init__(self, conf, network, process_monitor, version=None,
plugin=None, segment=None):
super(DhcpLocalProcess, self).__init__(conf, network, process_monitor,
version, plugin, segment)
super().__init__(conf, network, process_monitor,
version, plugin, segment)
self.confs_dir = self.get_confs_dir(conf)
if self.segment:
# In case of multi-segments support we want a dns process per vlan.
@ -304,7 +304,8 @@ class DhcpLocalProcess(DhcpBase, metaclass=abc.ABCMeta):
# NOTE(sahid): Keep the order to match directory path. This is used
# by external_process.ProcessManager to check whether the process
# is active.
return "%s/%s" % (self.segment.segmentation_id, self.network.id)
return "{}/{}".format(
self.segment.segmentation_id, self.network.id)
return self.network.id
def _remove_config_files(self):
@ -399,11 +400,11 @@ class DhcpLocalProcess(DhcpBase, metaclass=abc.ABCMeta):
"""A helper function to read a value from one of the state files."""
file_name = self.get_conf_file_name(kind)
try:
with open(file_name, 'r') as f:
with open(file_name) as f:
return converter(f.read()) if converter else f.read()
except ValueError:
LOG.debug("Unable to convert value in %s", file_name)
except IOError:
except OSError:
LOG.debug("Unable to access %s", file_name)
return None
@ -791,7 +792,7 @@ class Dnsmasq(DhcpLocalProcess):
ip_addresses[0].replace('.', '-').replace(':', '-'))
fqdn = hostname
if self.conf.dns_domain:
fqdn = '%s.%s' % (fqdn, self.conf.dns_domain)
fqdn = '{}.{}'.format(fqdn, self.conf.dns_domain)
return hostname, fqdn
@ -812,9 +813,9 @@ class Dnsmasq(DhcpLocalProcess):
tag, # A dhcp-host tag to add to the configuration if supported
)
"""
v6_nets = dict((subnet.id, subnet) for subnet in
self._get_all_subnets(self.network)
if subnet.ip_version == 6)
v6_nets = {subnet.id: subnet for subnet in
self._get_all_subnets(self.network)
if subnet.ip_version == 6}
for port in self.network.ports:
if not port_requires_dhcp_configuration(port):
@ -967,7 +968,7 @@ class Dnsmasq(DhcpLocalProcess):
port, alloc, hostname, name, no_dhcp, no_opts, tag = host_tuple
if no_dhcp:
if not no_opts and self._get_port_extra_dhcp_opts(port):
buf.write('%s,%s%s%s\n' % (
buf.write('{},{}{}{}\n'.format(
port.mac_address, tag,
'set:', self._PORT_TAG_PREFIX % port.id))
continue
@ -1033,7 +1034,7 @@ class Dnsmasq(DhcpLocalProcess):
ips = self._parse_ip_addresses(host[2:])
for ip in ips:
leases.add((ip, mac, client_id))
except (OSError, IOError):
except OSError:
LOG.debug('Error while reading hosts file %s', filename)
return leases
@ -1198,7 +1199,8 @@ class Dnsmasq(DhcpLocalProcess):
# It is compulsory to write the `fqdn` before the `hostname` in
# order to obtain it in PTR responses.
if alloc:
buf.write('%s\t%s %s\n' % (alloc.ip_address, fqdn, hostname))
buf.write('{}\t{} {}\n'.format(
alloc.ip_address, fqdn, hostname))
addn_hosts = self.get_conf_file_name('addn_hosts')
file_utils.replace_file(addn_hosts, buf.getvalue())
return addn_hosts
@ -1277,7 +1279,8 @@ class Dnsmasq(DhcpLocalProcess):
if not gateway:
gateway = hr.nexthop
else:
host_routes.append("%s,%s" % (hr.destination, hr.nexthop))
host_routes.append("{},{}".format(
hr.destination, hr.nexthop))
# Determine metadata port route
if subnet.ip_version == constants.IP_VERSION_4:
@ -1293,7 +1296,7 @@ class Dnsmasq(DhcpLocalProcess):
elif (self.conf.force_metadata or
(isolated_subnets[subnet.id] and
self.conf.enable_isolated_metadata)):
self.conf.enable_isolated_metadata)):
subnet_dhcp_ip = subnet_to_interface_ip.get(subnet.id)
if subnet_dhcp_ip:
metadata_route_ip = subnet_dhcp_ip
@ -1303,7 +1306,8 @@ class Dnsmasq(DhcpLocalProcess):
if metadata_route_ip:
host_routes.append(
'%s,%s' % (constants.METADATA_CIDR, metadata_route_ip)
'{},{}'.format(constants.METADATA_CIDR,
metadata_route_ip)
)
for s in self._get_all_subnets(self.network):
@ -1315,8 +1319,8 @@ class Dnsmasq(DhcpLocalProcess):
if host_routes:
if gateway:
host_routes.append("%s,%s" % (constants.IPv4_ANY,
gateway))
host_routes.append("{},{}".format(constants.IPv4_ANY,
gateway))
options.append(
self._format_option(
subnet.ip_version,
@ -1345,9 +1349,9 @@ class Dnsmasq(DhcpLocalProcess):
dhcp_ips = collections.defaultdict(list)
for port in self.network.ports:
if self._get_port_extra_dhcp_opts(port):
port_ip_versions = set(
[netaddr.IPAddress(ip.ip_address).version
for ip in port.fixed_ips])
port_ip_versions = {
netaddr.IPAddress(ip.ip_address).version
for ip in port.fixed_ips}
for opt in port.extra_dhcp_opts:
if opt.opt_name in (edo_ext.DHCP_OPT_CLIENT_ID,
DHCP_OPT_CLIENT_ID_NUM,
@ -1391,10 +1395,10 @@ class Dnsmasq(DhcpLocalProcess):
return options
def _make_subnet_interface_ip_map(self):
subnet_lookup = dict(
(netaddr.IPNetwork(subnet.cidr), subnet.id)
subnet_lookup = {
netaddr.IPNetwork(subnet.cidr): subnet.id
for subnet in self.network.subnets
)
}
retval = {}
@ -1448,7 +1452,7 @@ class Dnsmasq(DhcpLocalProcess):
"""
isolated_subnets = collections.defaultdict(lambda: True)
all_subnets = cls._get_all_subnets(network)
subnets = dict((subnet.id, subnet) for subnet in all_subnets)
subnets = {subnet.id: subnet for subnet in all_subnets}
for port in network.ports:
if port.device_owner not in constants.ROUTER_INTERFACE_OWNERS:
@ -1511,7 +1515,7 @@ class Dnsmasq(DhcpLocalProcess):
return any(isolated_subnets[s.id] for s in dhcp_subnets)
class DeviceManager(object):
class DeviceManager:
def __init__(self, conf, plugin):
self.conf = conf
@ -1575,7 +1579,7 @@ class DeviceManager(object):
subnet.cidr, gateway))
if is_old_gateway_not_in_subnet:
onlink = device.route.list_onlink_routes(ip_version)
existing_onlink_routes = set(r['cidr'] for r in onlink)
existing_onlink_routes = {r['cidr'] for r in onlink}
if gateway in existing_onlink_routes:
device.route.delete_route(gateway, scope='link')
@ -1635,7 +1639,7 @@ class DeviceManager(object):
# Compare what the subnets should be against what is already
# on the port.
dhcp_enabled_subnet_ids = set(dhcp_subnets)
port_subnet_ids = set(ip.subnet_id for ip in port.fixed_ips)
port_subnet_ids = {ip.subnet_id for ip in port.fixed_ips}
# If those differ, we need to call update.
if dhcp_enabled_subnet_ids != port_subnet_ids:
@ -1865,7 +1869,7 @@ class DeviceManager(object):
for fixed_ip in port.fixed_ips:
subnet = fixed_ip.subnet
net = netaddr.IPNetwork(subnet.cidr)
ip_cidr = '%s/%s' % (fixed_ip.ip_address, net.prefixlen)
ip_cidr = '{}/{}'.format(fixed_ip.ip_address, net.prefixlen)
ip_cidrs.append(ip_cidr)
if self.driver.use_gateway_ips:
@ -1877,7 +1881,7 @@ class DeviceManager(object):
gateway = subnet.gateway_ip
if gateway:
net = netaddr.IPNetwork(subnet.cidr)
ip_cidrs.append('%s/%s' % (gateway, net.prefixlen))
ip_cidrs.append('{}/{}'.format(gateway, net.prefixlen))
if self.conf.force_metadata or self.conf.enable_isolated_metadata:
ip_cidrs.append(constants.METADATA_CIDR)

View File

@ -70,12 +70,12 @@ exec neutron-pd-notify $1 {{ prefix_path }} {{ l3_agent_pid }}
class PDDibbler(pd_driver.PDDriverBase):
def __init__(self, router_id, subnet_id, ri_ifname):
super(PDDibbler, self).__init__(router_id, subnet_id, ri_ifname)
self.requestor_id = "%s:%s:%s" % (self.router_id,
self.subnet_id,
self.ri_ifname)
self.dibbler_client_working_area = "%s/%s" % (cfg.CONF.pd_confs,
self.requestor_id)
super().__init__(router_id, subnet_id, ri_ifname)
self.requestor_id = "{}:{}:{}".format(self.router_id,
self.subnet_id,
self.ri_ifname)
self.dibbler_client_working_area = "{}/{}".format(cfg.CONF.pd_confs,
self.requestor_id)
self.prefix_path = "%s/prefix" % self.dibbler_client_working_area
self.pid_path = "%s/client.pid" % self.dibbler_client_working_area
self.converted_subnet_id = self.subnet_id.replace('-', '')

View File

@ -38,7 +38,7 @@ agent_cfg.register_external_process_opts()
agent_cfg.register_process_monitor_opts(cfg.CONF)
class MonitoredProcess(object, metaclass=abc.ABCMeta):
class MonitoredProcess(metaclass=abc.ABCMeta):
@property
@abc.abstractmethod
def active(self):
@ -76,7 +76,7 @@ class ProcessManager(MonitoredProcess):
self.service_pid_fname = 'pid'
self.service = DEFAULT_SERVICE_NAME
process_tag = '%s-%s' % (self.service, self.uuid)
process_tag = '{}-{}'.format(self.service, self.uuid)
self.cmd_addl_env = cmd_addl_env or {}
self.cmd_addl_env[PROCESS_TAG] = process_tag
@ -186,7 +186,7 @@ class ProcessManager(MonitoredProcess):
ServiceId = collections.namedtuple('ServiceId', ['uuid', 'service'])
class ProcessMonitor(object):
class ProcessMonitor:
def __init__(self, config, resource_type):
"""Handle multiple process managers and watch over all of them.

View File

@ -188,13 +188,13 @@ class LinuxInterfaceDriver(interface.LinuxInterfaceDriver,
on-link route list
"""
device = ip_lib.IPDevice(device_name, namespace=namespace)
new_onlink_cidrs = set(s['cidr'] for s in extra_subnets or [])
new_onlink_cidrs = {s['cidr'] for s in extra_subnets or []}
preserve_ips = set(preserve_ips if preserve_ips else [])
onlink = device.route.list_onlink_routes(constants.IP_VERSION_4)
if is_ipv6:
onlink += device.route.list_onlink_routes(constants.IP_VERSION_6)
existing_onlink_cidrs = set(r['cidr'] for r in onlink)
existing_onlink_cidrs = {r['cidr'] for r in onlink}
for route in new_onlink_cidrs - existing_onlink_cidrs:
LOG.debug('Adding onlink route (%s)', route)
@ -245,8 +245,8 @@ class LinuxInterfaceDriver(interface.LinuxInterfaceDriver,
"""Configure handling of IPv6 Router Advertisements on an
interface. See common/constants.py for possible values.
"""
cmd = ['net.ipv6.conf.%(dev)s.accept_ra=%(value)s' % {'dev': dev_name,
'value': value}]
cmd = ['net.ipv6.conf.{dev}.accept_ra={value}'.format(dev=dev_name,
value=value)]
ip_lib.sysctl(cmd, namespace=namespace)
@staticmethod
@ -314,7 +314,7 @@ class OVSInterfaceDriver(LinuxInterfaceDriver):
DEV_NAME_PREFIX = constants.TAP_DEVICE_PREFIX
def __init__(self, conf, **kwargs):
super(OVSInterfaceDriver, self).__init__(conf, **kwargs)
super().__init__(conf, **kwargs)
ovs_conf.register_ovs_agent_opts(self.conf)
if self.conf.ovs_use_veth:
self.DEV_NAME_PREFIX = 'ns-'

View File

@ -28,7 +28,7 @@ MAX_CONNTRACK_ZONES = 65535
ZONE_START = 4097
class IpConntrackUpdate(object):
class IpConntrackUpdate:
"""Encapsulates a conntrack update
An instance of this object carries the information necessary to
@ -58,7 +58,7 @@ def get_conntrack(get_rules_for_table_func, filtered_ports, unfiltered_ports,
return CONTRACK_MGRS[namespace]
class IpConntrackManager(object):
class IpConntrackManager:
"""Smart wrapper for ip conntrack."""
def __init__(self, get_rules_for_table_func, filtered_ports,
@ -257,7 +257,7 @@ class IpConntrackManager(object):
class OvsIpConntrackManager(IpConntrackManager):
def __init__(self, execute=None):
super(OvsIpConntrackManager, self).__init__(
super().__init__(
get_rules_for_table_func=None,
filtered_ports={}, unfiltered_ports={},
execute=execute, namespace=None, zone_per_port=False)

View File

@ -110,7 +110,7 @@ class DADFailed(AddressNotReady):
InvalidArgument = privileged.InvalidArgument
class SubProcessBase(object):
class SubProcessBase:
def __init__(self, namespace=None,
log_fail_as_error=True):
self.namespace = namespace
@ -155,7 +155,7 @@ class SubProcessBase(object):
class IPWrapper(SubProcessBase):
def __init__(self, namespace=None):
super(IPWrapper, self).__init__(namespace=namespace)
super().__init__(namespace=namespace)
self.netns = IpNetnsCommand(self)
def device(self, name):
@ -344,7 +344,7 @@ class IPWrapper(SubProcessBase):
class IPDevice(SubProcessBase):
def __init__(self, name, namespace=None, kind='link'):
super(IPDevice, self).__init__(namespace=namespace)
super().__init__(namespace=namespace)
self._name = name
self.kind = kind
self.link = IpLinkCommand(self)
@ -360,8 +360,8 @@ class IPDevice(SubProcessBase):
return self.name
def __repr__(self):
return "<IPDevice(name=%s, namespace=%s)>" % (self._name,
self.namespace)
return "<IPDevice(name={}, namespace={})>".format(self._name,
self.namespace)
def exists(self):
"""Return True if the device exists in the namespace."""
@ -441,7 +441,7 @@ class IPDevice(SubProcessBase):
self._name = name
class IpDeviceCommandBase(object):
class IpDeviceCommandBase:
def __init__(self, parent):
self._parent = parent
@ -639,7 +639,7 @@ class IpAddrCommand(IpDeviceCommandBase):
class IpRouteCommand(IpDeviceCommandBase):
def __init__(self, parent, table=None):
super(IpRouteCommand, self).__init__(parent)
super().__init__(parent)
self._table = table
def add_gateway(self, gateway, metric=None, table=None, scope='global'):
@ -693,7 +693,7 @@ class IpRouteCommand(IpDeviceCommandBase):
class IPRoute(SubProcessBase):
def __init__(self, namespace=None, table=None):
super(IPRoute, self).__init__(namespace=namespace)
super().__init__(namespace=namespace)
self.name = None
self.route = IpRouteCommand(self, table=table)
@ -743,7 +743,7 @@ class IpNeighCommand(IpDeviceCommandBase):
self.delete(entry['dst'], entry['lladdr'])
class IpNetnsCommand(object):
class IpNetnsCommand:
def __init__(self, parent):
self._parent = parent
@ -1286,7 +1286,7 @@ def _parse_ip_rule(rule, ip_version):
fwmark = rule['attrs'].get('FRA_FWMARK')
if fwmark:
fwmask = rule['attrs'].get('FRA_FWMASK')
parsed_rule['fwmark'] = '{0:#x}/{1:#x}'.format(fwmark, fwmask)
parsed_rule['fwmark'] = f'{fwmark:#x}/{fwmask:#x}'
iifname = rule['attrs'].get('FRA_IIFNAME')
if iifname:
parsed_rule['iif'] = iifname
@ -1615,7 +1615,7 @@ def list_ip_routes(namespace, ip_version, scope=None, via=None, table=None,
for route in routes:
cidr = linux_utils.get_attr(route, 'RTA_DST')
if cidr:
cidr = '%s/%s' % (cidr, route['dst_len'])
cidr = '{}/{}'.format(cidr, route['dst_len'])
else:
cidr = constants.IP_ANY[ip_version]
table = int(linux_utils.get_attr(route, 'RTA_TABLE'))

View File

@ -24,7 +24,7 @@ SWAP_SUFFIX = '-n'
IPSET_NAME_MAX_LENGTH = 31 - len(SWAP_SUFFIX)
class IpsetManager(object):
class IpsetManager:
"""Smart wrapper for ipset.
Keeps track of ip addresses per set, using bulk
@ -120,10 +120,10 @@ class IpsetManager(object):
def _refresh_set(self, set_name, member_ips, ethertype):
new_set_name = set_name + SWAP_SUFFIX
set_type = self._get_ipset_set_type(ethertype)
process_input = ["create %s hash:net family %s" % (new_set_name,
set_type)]
process_input = ["create {} hash:net family {}".format(new_set_name,
set_type)]
for ip in member_ips:
process_input.append("add %s %s" % (new_set_name, ip))
process_input.append("add {} {}".format(new_set_name, ip))
self._restore_sets(process_input)
self._swap_sets(new_set_name, set_name)

View File

@ -429,8 +429,8 @@ class IptablesFirewallDriver(firewall.FirewallDriver):
for dev, match in ((br_dev, match_physdev), (br_dev, match_interface),
(port_dev, match_physdev)):
match = match % dev
rule = '%s -m comment --comment "%s" -j CT %s' % (match, comment,
conntrack)
rule = '{} -m comment --comment "{}" -j CT {}'.format(
match, comment, conntrack)
rules.append(rule)
return rules
@ -853,7 +853,7 @@ class IptablesFirewallDriver(firewall.FirewallDriver):
args += ['--%s' % direction, '%s' % port_range_min]
else:
args += ['-m', 'multiport', '--%ss' % direction,
'%s:%s' % (port_range_min, port_range_max)]
'{}:{}'.format(port_range_min, port_range_max)]
return args
def _ip_prefix_arg(self, direction, ip_prefix):
@ -872,7 +872,7 @@ class IptablesFirewallDriver(firewall.FirewallDriver):
def _port_chain_name(self, port, direction):
return iptables_manager.get_chain_name(
'%s%s' % (CHAIN_NAME_PREFIX[direction], port['device'][3:]))
'{}{}'.format(CHAIN_NAME_PREFIX[direction], port['device'][3:]))
def filter_defer_apply_on(self):
if not self._defer_apply:
@ -1032,7 +1032,7 @@ class IptablesFirewallDriver(firewall.FirewallDriver):
def _get_sg_members(self, sg_info, sg_id, ethertype):
ip_mac_addresses = sg_info.get(sg_id, {}).get(ethertype, [])
return set([ip_mac[0] for ip_mac in ip_mac_addresses])
return {ip_mac[0] for ip_mac in ip_mac_addresses}
def filter_defer_apply_off(self):
if self._defer_apply:
@ -1054,12 +1054,11 @@ class OVSHybridIptablesFirewallDriver(IptablesFirewallDriver):
def _port_chain_name(self, port, direction):
return iptables_manager.get_chain_name(
'%s%s' % (CHAIN_NAME_PREFIX[direction], port['device']))
'{}{}'.format(CHAIN_NAME_PREFIX[direction], port['device']))
def _get_br_device_name(self, port):
return ('qvb' + port['device'])[:constants.LINUX_DEV_LEN]
def _get_device_name(self, port):
device_name = super(
OVSHybridIptablesFirewallDriver, self)._get_device_name(port)
device_name = super()._get_device_name(port)
return get_hybrid_port_name(device_name)

View File

@ -77,12 +77,12 @@ def comment_rule(rule, comment):
comment = '-m comment --comment "%s"' % comment
if rule.startswith('-j'):
# this is a jump only rule so we just put the comment first
return '%s %s' % (comment, rule)
return '{} {}'.format(comment, rule)
try:
jpos = rule.index(' -j ')
return ' '.join((rule[:jpos], comment, rule[jpos + 1:]))
except ValueError:
return '%s %s' % (rule, comment)
return '{} {}'.format(rule, comment)
def get_chain_name(chain_name, wrap=True):
@ -92,7 +92,7 @@ def get_chain_name(chain_name, wrap=True):
return chain_name[:constants.MAX_IPTABLES_CHAIN_LEN_NOWRAP]
class IptablesRule(object):
class IptablesRule:
"""An iptables rule.
You shouldn't need to use this class directly, it's only used by
@ -121,16 +121,16 @@ class IptablesRule(object):
def __str__(self):
if self.wrap:
chain = '%s-%s' % (self.wrap_name, self.chain)
chain = '{}-{}'.format(self.wrap_name, self.chain)
else:
chain = self.chain
rule = '-A %s %s' % (chain, self.rule)
rule = '-A {} {}'.format(chain, self.rule)
# If self.rule is '' the above will cause a trailing space, which
# could cause us to not match on save/restore, so strip it now.
return comment_rule(rule.strip(), self.comment)
class IptablesTable(object):
class IptablesTable:
"""An iptables table."""
def __init__(self, binary_name=binary_name):
@ -195,7 +195,7 @@ class IptablesTable(object):
self.remove_rules += [str(r) for r in self.rules
if r.chain == name or jump_snippet in r.rule]
else:
jump_snippet = '-j %s-%s' % (self.wrap_name, name)
jump_snippet = '-j {}-{}'.format(self.wrap_name, name)
# Remove rules from list that have a matching chain name or
# a matching jump chain
@ -227,7 +227,7 @@ class IptablesTable(object):
def _wrap_target_chain(self, s, wrap):
if s.startswith('$'):
s = ('%s-%s' % (self.wrap_name, get_chain_name(s[1:], wrap)))
s = ('{}-{}'.format(self.wrap_name, get_chain_name(s[1:], wrap)))
return s
@ -277,7 +277,7 @@ class IptablesTable(object):
self.rules.remove(rule)
class IptablesManager(object):
class IptablesManager:
"""Wrapper for iptables.
See IptablesTable for some usage docs
@ -580,7 +580,7 @@ class IptablesManager(object):
s += [('ip6tables', self.ipv6)]
all_commands = [] # variable to keep track all commands for return val
for cmd, tables in s:
args = ['%s-save' % (cmd,)]
args = ['{}-save'.format(cmd)]
if self.namespace:
args = ['ip', 'netns', 'exec', self.namespace] + args
try:
@ -624,7 +624,7 @@ class IptablesManager(object):
# always end with a new line
commands.append('')
args = ['%s-restore' % (cmd,), '-n']
args = ['{}-restore'.format(cmd), '-n']
if self.namespace:
args = ['ip', 'netns', 'exec', self.namespace] + args
@ -684,7 +684,7 @@ class IptablesManager(object):
line.strip() not in rules]
# generate our list of chain names
our_chains = [':%s-%s' % (self.wrap_name, name) for name in chains]
our_chains = [':{}-{}'.format(self.wrap_name, name) for name in chains]
# the unwrapped chains (e.g. neutron-filter-top) may already exist in
# the new_filter since they aren't marked by the wrap_name so we only

View File

@ -65,7 +65,7 @@ def get_free_range(parent_range, excluded_ranges, size=PRIMARY_VIP_RANGE_SIZE):
free_cidrs = netaddr.IPSet([parent_range]) - netaddr.IPSet(excluded_ranges)
for cidr in free_cidrs.iter_cidrs():
if cidr.prefixlen <= size:
return '%s/%s' % (cidr.network, size)
return '{}/{}'.format(cidr.network, size)
raise ValueError(_('Network of size %(size)s, from IP range '
'%(parent_range)s excluding IP ranges '
@ -82,7 +82,7 @@ class InvalidInstanceStateException(exceptions.NeutronException):
def __init__(self, **kwargs):
if 'valid_states' not in kwargs:
kwargs['valid_states'] = ', '.join(VALID_STATES)
super(InvalidInstanceStateException, self).__init__(**kwargs)
super().__init__(**kwargs)
class InvalidAuthenticationTypeException(exceptions.NeutronException):
@ -92,10 +92,10 @@ class InvalidAuthenticationTypeException(exceptions.NeutronException):
def __init__(self, **kwargs):
if 'valid_auth_types' not in kwargs:
kwargs['valid_auth_types'] = ', '.join(VALID_AUTH_TYPES)
super(InvalidAuthenticationTypeException, self).__init__(**kwargs)
super().__init__(**kwargs)
class KeepalivedVipAddress(object):
class KeepalivedVipAddress:
"""A virtual address entry of a keepalived configuration."""
def __init__(self, ip_address, interface_name, scope=None, track=True):
@ -109,13 +109,13 @@ class KeepalivedVipAddress(object):
self.ip_address == other.ip_address)
def __str__(self):
return '[%s, %s, %s, %s]' % (self.ip_address,
self.interface_name,
self.scope,
self.track)
return '[{}, {}, {}, {}]'.format(self.ip_address,
self.interface_name,
self.scope,
self.track)
def build_config(self):
result = '%s dev %s' % (self.ip_address, self.interface_name)
result = '{} dev {}'.format(self.ip_address, self.interface_name)
if self.scope:
result += ' scope %s' % self.scope
if not self.track and _is_keepalived_use_no_track_supported():
@ -123,7 +123,7 @@ class KeepalivedVipAddress(object):
return result
class KeepalivedVirtualRoute(object):
class KeepalivedVirtualRoute:
"""A virtual route entry of a keepalived configuration."""
def __init__(self, destination, nexthop, interface_name=None,
@ -152,7 +152,7 @@ class KeepalivedVirtualRoute(object):
return output
class KeepalivedInstanceRoutes(object):
class KeepalivedInstanceRoutes:
def __init__(self):
self.gateway_routes = []
self.extra_routes = []
@ -181,7 +181,7 @@ class KeepalivedInstanceRoutes(object):
[' }'])
class KeepalivedInstance(object):
class KeepalivedInstance:
"""Instance section of a keepalived configuration."""
def __init__(self, state, interface, vrouter_id, ha_cidrs,
@ -264,7 +264,8 @@ class KeepalivedInstance(object):
ip = (netaddr.IPNetwork(self.primary_vip_range).network +
self.vrouter_id)
return str(netaddr.IPNetwork('%s/%s' % (ip, PRIMARY_VIP_RANGE_SIZE)))
return str(netaddr.IPNetwork('{}/{}'.format(
ip, PRIMARY_VIP_RANGE_SIZE)))
def _build_vips_config(self):
# NOTE(amuller): The primary VIP must be consistent in order to avoid
@ -349,7 +350,7 @@ class KeepalivedInstance(object):
return config
class KeepalivedConf(object):
class KeepalivedConf:
"""A keepalived configuration."""
def __init__(self):
@ -384,7 +385,7 @@ class KeepalivedConf(object):
return '\n'.join(self.build_config())
class KeepalivedManager(object):
class KeepalivedManager:
"""Wrapper for keepalived.
This wrapper permits to write keepalived config files, to start/restart
@ -436,7 +437,7 @@ class KeepalivedManager(object):
try:
with open(config_path) as conf:
return conf.read()
except (OSError, IOError) as e:
except OSError as e:
if e.errno != errno.ENOENT:
raise
@ -535,7 +536,7 @@ class KeepalivedTrackScript(KeepalivedConf):
def build_config_preamble(self):
config = ['',
'vrrp_script %s_%s {' % (HEALTH_CHECK_NAME, self.vr_id),
'vrrp_script {}_{} {{'.format(HEALTH_CHECK_NAME, self.vr_id),
' script "%s"' % self._get_script_location(),
' interval %s' % self.interval,
' fall 2',
@ -557,7 +558,7 @@ class KeepalivedTrackScript(KeepalivedConf):
return ''
config = [' track_script {',
' %s_%s' % (HEALTH_CHECK_NAME, self.vr_id),
' {}_{}'.format(HEALTH_CHECK_NAME, self.vr_id),
' }']
return config
@ -575,7 +576,7 @@ class KeepalivedTrackScript(KeepalivedConf):
6: 'ping6',
}.get(netaddr.IPAddress(ip_addr).version)
return '%s -c 1 -w 1 %s 1>/dev/null || exit 1' % (cmd, ip_addr)
return '{} -c 1 -w 1 {} 1>/dev/null || exit 1'.format(cmd, ip_addr)
def _check_ip_assigned(self):
cmd = 'ip a | grep %s || exit 0'

View File

@ -103,8 +103,8 @@ class FloatingIPTcCommandBase(ip_lib.IPDevice):
return filterids
def _add_filter(self, qdisc_id, direction, ip, rate, burst):
rate_value = "%s%s" % (rate, tc_lib.BW_LIMIT_UNIT)
burst_value = "%s%s" % (
rate_value = "{}{}".format(rate, tc_lib.BW_LIMIT_UNIT)
burst_value = "{}{}".format(
tc_lib.TcCommand.get_ingress_qdisc_burst_value(rate, burst),
tc_lib.BURST_UNIT
)

View File

@ -20,7 +20,7 @@ import eventlet
from neutron.agent.common import async_process
class OFEvent(object):
class OFEvent:
def __init__(self, event_type, flow):
self.event_type = event_type
@ -46,9 +46,9 @@ class OFMonitor(async_process.AsyncProcess):
def __init__(self, bridge_name, namespace=None, respawn_interval=None,
start=True):
cmd = ['ovs-ofctl', 'monitor', bridge_name, 'watch:', '--monitor']
super(OFMonitor, self).__init__(cmd, run_as_root=True,
respawn_interval=respawn_interval,
namespace=namespace)
super().__init__(cmd, run_as_root=True,
respawn_interval=respawn_interval,
namespace=namespace)
if start:
self.start()
@ -77,8 +77,8 @@ class OFMonitor(async_process.AsyncProcess):
def start(self, **kwargs):
if not self._is_running:
super(OFMonitor, self).start(block=True)
super().start(block=True)
def stop(self, **kwargs):
if self._is_running:
super(OFMonitor, self).stop(block=True)
super().stop(block=True)

View File

@ -60,7 +60,7 @@ def _replace_register(flow_params, register_number, register_value):
try:
reg_port = flow_params[register_value]
del flow_params[register_value]
flow_params['reg{:d}'.format(register_number)] = reg_port
flow_params[f'reg{register_number:d}'] = reg_port
except KeyError:
pass
@ -126,7 +126,7 @@ def get_tag_from_other_config(bridge, port_name):
port_name=port_name, other_config=other_config)
class SecurityGroup(object):
class SecurityGroup:
def __init__(self, id_):
self.id = id_
self.raw_rules = []
@ -161,7 +161,7 @@ class SecurityGroup(object):
return self.members.get(ethertype, [])
class OFPort(object):
class OFPort:
def __init__(self, port_dict, ovs_port, vlan_tag, segment_id=None,
network_type=None, physical_network=None):
self.id = port_dict['device']
@ -213,7 +213,7 @@ class OFPort(object):
self.neutron_port_dict = port_dict.copy()
class SGPortMap(object):
class SGPortMap:
def __init__(self):
self.ports = {}
self.sec_groups = {}
@ -262,7 +262,7 @@ class SGPortMap(object):
sec_group.members = members
class ConjIdMap(object):
class ConjIdMap:
"""Handle conjunction ID allocations and deallocations."""
CONJ_ID_BLOCK_SIZE = 8
@ -270,7 +270,7 @@ class ConjIdMap(object):
def __new__(cls, int_br):
if not hasattr(cls, '_instance'):
cls._instance = super(ConjIdMap, cls).__new__(cls)
cls._instance = super().__new__(cls)
return cls._instance
def __init__(self, int_br):
@ -348,7 +348,7 @@ class ConjIdMap(object):
return a list of (remote_sg_id, conj_id), which are no longer
in use.
"""
result = set([])
result = set()
for k in list(self.id_map.keys()):
if sg_id in k[0:2]:
conj_id = self.id_map.pop(k)
@ -367,7 +367,7 @@ class ConjIdMap(object):
return result
class ConjIPFlowManager(object):
class ConjIPFlowManager:
"""Manage conj_id allocation and remote securitygroups derived
conjunction flows.
@ -614,7 +614,7 @@ class OVSFirewallDriver(firewall.FirewallDriver):
dl_type = kwargs.get('dl_type')
create_reg_numbers(kwargs)
if isinstance(dl_type, int):
kwargs['dl_type'] = "0x{:04x}".format(dl_type)
kwargs['dl_type'] = f"0x{dl_type:04x}"
if self._update_cookie:
kwargs['cookie'] = self._update_cookie
if self._deferred:
@ -1279,7 +1279,7 @@ class OVSFirewallDriver(firewall.FirewallDriver):
priority=12,
dl_dst=mac,
reg_net=vlan_tag,
actions='output:{:d}'.format(dst_port)
actions=f'output:{dst_port:d}'
)
# For packets from patch ports.
self._add_flow(
@ -1288,7 +1288,7 @@ class OVSFirewallDriver(firewall.FirewallDriver):
priority=12,
dl_dst=mac,
dl_vlan=vlan_tag,
actions='strip_vlan,output:{:d}'.format(dst_port)
actions=f'strip_vlan,output:{dst_port:d}'
)
# The former flow may not match, that means the destination port is
@ -1410,7 +1410,7 @@ class OVSFirewallDriver(firewall.FirewallDriver):
dl_type=lib_const.ETHERTYPE_IPV6,
nw_proto=lib_const.PROTO_NUM_IPV6_ICMP,
icmp_type=icmp_type,
actions='output:{:d}'.format(port.ofport)
actions=f'output:{port.ofport:d}'
)
def _initialize_ingress(self, port):
@ -1420,7 +1420,7 @@ class OVSFirewallDriver(firewall.FirewallDriver):
priority=100,
dl_type=lib_const.ETHERTYPE_ARP,
reg_port=port.ofport,
actions='output:{:d}'.format(port.ofport)
actions=f'output:{port.ofport:d}'
)
# Allow custom ethertypes
@ -1430,7 +1430,7 @@ class OVSFirewallDriver(firewall.FirewallDriver):
priority=100,
dl_type=permitted_ethertype,
reg_port=port.ofport,
actions='output:{:d}'.format(port.ofport))
actions=f'output:{port.ofport:d}')
self._initialize_ingress_ipv6_icmp(port)
@ -1446,7 +1446,7 @@ class OVSFirewallDriver(firewall.FirewallDriver):
nw_proto=lib_const.PROTO_NUM_UDP,
tp_src=src_port,
tp_dst=dst_port,
actions='output:{:d}'.format(port.ofport)
actions=f'output:{port.ofport:d}'
)
# Track untracked
@ -1466,7 +1466,7 @@ class OVSFirewallDriver(firewall.FirewallDriver):
ct_state=ovsfw_consts.OF_STATE_TRACKED,
priority=80,
reg_port=port.ofport,
actions='resubmit(,{:d})'.format(ovs_consts.RULES_INGRESS_TABLE)
actions=f'resubmit(,{ovs_consts.RULES_INGRESS_TABLE:d})'
)
def _initialize_tracked_ingress(self, port):
@ -1495,10 +1495,10 @@ class OVSFirewallDriver(firewall.FirewallDriver):
# offload case. In case the explicitly_egress_direct is used the
# pipeline don't contain action NORMAL so we don't have flood rule
# issue.
actions = 'output:{:d}'.format(port.ofport)
actions = f'output:{port.ofport:d}'
if (self.int_br.br.is_hw_offload_enabled and
not cfg.CONF.AGENT.explicitly_egress_direct):
actions = 'mod_vlan_vid:{:d},normal'.format(port.vlan_tag)
actions = f'mod_vlan_vid:{port.vlan_tag:d},normal'
# Allow established and related connections
for state in (ovsfw_consts.OF_STATE_ESTABLISHED_REPLY,
ovsfw_consts.OF_STATE_RELATED):

View File

@ -40,7 +40,7 @@ def is_bridge_cleaned(bridge):
return other_config.get(Helper.CLEANED_METADATA, '').lower() == 'true'
class Helper(object):
class Helper:
"""Helper to avoid loading firewall driver.
The main purpose is to avoid loading iptables driver for cases where no

View File

@ -210,7 +210,7 @@ def populate_flow_common(direction, flow_template, port):
"""Initialize common flow fields."""
if direction == n_consts.INGRESS_DIRECTION:
flow_template['table'] = ovs_consts.RULES_INGRESS_TABLE
flow_template['actions'] = "output:{:d}".format(port.ofport)
flow_template['actions'] = f"output:{port.ofport:d}"
elif direction == n_consts.EGRESS_DIRECTION:
flow_template['table'] = ovs_consts.RULES_EGRESS_TABLE
# Traffic can be both ingress and egress, check that no ingress rules
@ -241,10 +241,10 @@ def create_port_range_flows(flow_template, rule):
if protocol is None:
return []
flows = []
src_port_match = '{:s}_src'.format(protocol)
src_port_match = f'{protocol:s}_src'
src_port_min = rule.get('source_port_range_min')
src_port_max = rule.get('source_port_range_max')
dst_port_match = '{:s}_dst'.format(protocol)
dst_port_match = f'{protocol:s}_dst'
dst_port_min = rule.get('port_range_min')
dst_port_max = rule.get('port_range_max')

View File

@ -32,7 +32,7 @@ from neutron.common import utils
LOG = logging.getLogger(__name__)
class PrefixDelegation(object):
class PrefixDelegation:
def __init__(self, context, pmon, intf_driver, notifier, pd_update_cb,
agent_conf):
self.context = context
@ -394,7 +394,7 @@ def update_router(resource, event, l3_agent, payload):
router['ns_name'] = updated_router.get_gw_ns_name()
class PDInfo(object):
class PDInfo:
"""A class to simplify storing and passing of information relevant to
Prefix Delegation operations for a given subnet.
"""

View File

@ -20,7 +20,7 @@ from neutron.conf.agent import common as agent_conf
agent_conf.register_pddriver_opts()
class PDDriverBase(object, metaclass=abc.ABCMeta):
class PDDriverBase(metaclass=abc.ABCMeta):
def __init__(self, router_id, subnet_id, ri_ifname):
self.router_id = router_id

View File

@ -80,7 +80,7 @@ CONFIG_TEMPLATE = jinja2.Template("""interface {{ interface_name }}
""")
class DaemonMonitor(object):
class DaemonMonitor:
"""Manage the data and state of an radvd process."""
def __init__(self, router_id, router_ns, process_monitor, dev_name_helper,

View File

@ -203,7 +203,7 @@ class TcCommand(ip_lib.IPDevice):
def __init__(self, name, kernel_hz, namespace=None):
if kernel_hz <= 0:
raise InvalidKernelHzValue(value=kernel_hz)
super(TcCommand, self).__init__(name, namespace=namespace)
super().__init__(name, namespace=namespace)
self.kernel_hz = kernel_hz
@staticmethod

View File

@ -43,7 +43,7 @@ from neutron.privileged.agent.linux import utils as priv_utils
LOG = logging.getLogger(__name__)
class RootwrapDaemonHelper(object):
class RootwrapDaemonHelper:
__client = None
__lock = threading.Lock()
@ -237,18 +237,18 @@ def _get_conf_base(cfg_root, uuid, ensure_conf_dir):
def get_conf_file_name(cfg_root, uuid, cfg_file, ensure_conf_dir=False):
"""Returns the file name for a given kind of config file."""
conf_base = _get_conf_base(cfg_root, uuid, ensure_conf_dir)
return "%s.%s" % (conf_base, cfg_file)
return "{}.{}".format(conf_base, cfg_file)
def get_value_from_file(filename, converter=None):
try:
with open(filename, 'r') as f:
with open(filename) as f:
try:
return converter(f.read()) if converter else f.read()
except ValueError:
LOG.error('Unable to convert value in %s', filename)
except IOError as error:
except OSError as error:
LOG.debug('Unable to access %(filename)s; Error: %(error)s',
{'filename': filename, 'error': error})
@ -318,9 +318,9 @@ def get_cmdline_from_pid(pid):
# NOTE(jh): Even after the above check, the process may terminate
# before the open below happens
try:
with open('/proc/%s/cmdline' % pid, 'r') as f:
with open('/proc/%s/cmdline' % pid) as f:
cmdline = f.readline().split('\0')[:-1]
except IOError:
except OSError:
return []
# NOTE(slaweq): sometimes it may happen that values in
@ -466,8 +466,8 @@ class UnixDomainWSGIServer(wsgi.Server):
self._socket = None
self._launcher = None
self._server = None
super(UnixDomainWSGIServer, self).__init__(name, disable_ssl=True,
num_threads=num_threads)
super().__init__(name, disable_ssl=True,
num_threads=num_threads)
def start(self, application, file_socket, workers, backlog, mode=None):
self._socket = eventlet.listen(file_socket,

View File

@ -46,7 +46,7 @@ listen listener
"""
class HaproxyConfiguratorBase(object, metaclass=abc.ABCMeta):
class HaproxyConfiguratorBase(metaclass=abc.ABCMeta):
PROXY_CONFIG_DIR = None
HEADER_CONFIG_TEMPLATE = None
@ -75,7 +75,7 @@ class HaproxyConfiguratorBase(object, metaclass=abc.ABCMeta):
# starts with "haproxy" then things will get logged to
# /var/log/haproxy.log on Debian distros, instead of to syslog.
uuid = network_id or router_id
self.log_tag = "haproxy-{}-{}".format(METADATA_SERVICE_NAME, uuid)
self.log_tag = f"haproxy-{METADATA_SERVICE_NAME}-{uuid}"
self._haproxy_cfg = ''
self._resource_id = None
self._create_config()
@ -129,7 +129,7 @@ class HaproxyConfiguratorBase(object, metaclass=abc.ABCMeta):
}
if self.host_v6 and self.bind_interface:
cfg_info['bind_v6_line'] = (
'bind %s:%s interface %s' % (
'bind {}:{} interface {}'.format(
self.host_v6, self.port, self.bind_interface)
)
# If using the network ID, delete any spurious router ID that might
@ -198,7 +198,7 @@ class HaproxyConfiguratorBase(object, metaclass=abc.ABCMeta):
linux_utils.delete_if_exists(cfg_path, run_as_root=True)
class MetadataDriverBase(object, metaclass=abc.ABCMeta):
class MetadataDriverBase(metaclass=abc.ABCMeta):
monitors = {}
@staticmethod

View File

@ -39,7 +39,7 @@ MODE_MAP = {
}
class MetadataProxyHandlerBase(object, metaclass=abc.ABCMeta):
class MetadataProxyHandlerBase(metaclass=abc.ABCMeta):
NETWORK_ID_HEADER = None
ROUTER_ID_HEADER = None
@ -196,7 +196,7 @@ class MetadataProxyHandlerBase(object, metaclass=abc.ABCMeta):
resp.status_code)
class UnixDomainMetadataProxyBase(object, metaclass=abc.ABCMeta):
class UnixDomainMetadataProxyBase(metaclass=abc.ABCMeta):
def __init__(self, conf):
self.conf = conf

View File

@ -113,7 +113,7 @@ class OVNAgentExtension(extension.AgentExtension, metaclass=abc.ABCMeta):
pass
class OVNAgentExtensionAPI(object):
class OVNAgentExtensionAPI:
"""Implements the OVN Neutron Agent API"""
def __init__(self):

View File

@ -335,7 +335,7 @@ class SbGlobalUpdateEvent(_OVNExtensionEvent, row_event.RowEvent):
def __init__(self, agent):
table = 'SB_Global'
events = (self.ROW_UPDATE,)
super(SbGlobalUpdateEvent, self).__init__(events, table, None)
super().__init__(events, table, None)
self._agent = agent
self.event_name = self.__class__.__name__
self.first_run = True
@ -366,7 +366,7 @@ class SbGlobalUpdateEvent(_OVNExtensionEvent, row_event.RowEvent):
timer.start()
class MetadataAgent(object):
class MetadataAgent:
def __init__(self, conf):
self._conf = conf
@ -531,11 +531,11 @@ class MetadataAgent(object):
ns.decode('utf-8') if isinstance(ns, bytes) else ns
for ns in ip_lib.list_network_namespaces())
net_port_bindings = self.get_networks_port_bindings()
metadata_namespaces = set(
metadata_namespaces = {
self._get_namespace_name(
ovn_utils.get_network_name_from_datapath(datapath))
for datapath in (pb.datapath for pb in net_port_bindings)
)
}
unused_namespaces = [ns for ns in system_namespaces if
ns.startswith(NS_PREFIX) and
ns not in metadata_namespaces]

View File

@ -41,11 +41,11 @@ class MetadataAgentOvnSbIdl(ovsdb_monitor.OvnIdl):
for table in tables:
helper.register_table(table)
try:
super(MetadataAgentOvnSbIdl, self).__init__(
super().__init__(
None, connection_string, helper, leader_only=False)
except TypeError:
# TODO(twilson) We can remove this when we require ovs>=2.12.0
super(MetadataAgentOvnSbIdl, self).__init__(
super().__init__(
None, connection_string, helper)
if chassis:
for table in set(tables).intersection({'Chassis',
@ -69,7 +69,7 @@ class MetadataAgentOvnSbIdl(ovsdb_monitor.OvnIdl):
pass
class MetadataAgentOvsIdl(object):
class MetadataAgentOvsIdl:
def start(self):
connection_string = config.cfg.CONF.ovs.ovsdb_connection

View File

@ -43,7 +43,7 @@ def api_factory():
class OvsCleanup(command.BaseCommand):
def __init__(self, api, bridge, all_ports=False):
super(OvsCleanup, self).__init__(api)
super().__init__(api)
self.bridge = bridge
self.all_ports = all_ports

View File

@ -64,8 +64,8 @@ class BridgeCreateEvent(idl_event.RowEvent):
def __init__(self, agent):
self.agent = agent
table = 'Bridge'
super(BridgeCreateEvent, self).__init__((self.ROW_CREATE, ),
table, None)
super().__init__((self.ROW_CREATE, ),
table, None)
self.event_name = 'BridgeCreateEvent'
def run(self, event, row, old):
@ -83,7 +83,7 @@ class OvsIdl(idl.Idl):
configure_ssl_conn()
helper = self._get_ovsdb_helper(self._ovsdb_connection)
helper.register_all()
super(OvsIdl, self).__init__(self._ovsdb_connection, helper)
super().__init__(self._ovsdb_connection, helper)
self.notify_handler = ovsdb_event.RowEventHandler()
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=0.02),
@ -106,7 +106,7 @@ class OvsIdl(idl.Idl):
class OvsIdlMonitor(OvsIdl):
def __init__(self):
super(OvsIdlMonitor, self).__init__()
super().__init__()
self._lock = threading.Lock()
self._bridges_to_monitor = []
self._bridges_added_list = []

View File

@ -28,7 +28,7 @@ LOG = logging.getLogger(__name__)
objects.register_objects()
class RemoteResourceCache(object):
class RemoteResourceCache:
"""Retrieves and stashes logical resources in their OVO format.
This is currently only compatible with OVO objects that have an ID.
@ -233,7 +233,7 @@ class RemoteResourceCache(object):
return changed
class RemoteResourceWatcher(object):
class RemoteResourceWatcher:
"""Converts RPC callback notifications to local registry notifications.
This allows a constructor to listen for RPC callbacks for a given

View File

@ -63,7 +63,7 @@ def create_consumers(endpoints, prefix, topic_details, start_listening=True):
topic_name = topics.get_topic_name(prefix, topic, operation)
connection.create_consumer(topic_name, endpoints, fanout=True)
if node_name:
node_topic_name = '%s.%s' % (topic_name, node_name)
node_topic_name = '{}.{}'.format(topic_name, node_name)
connection.create_consumer(node_topic_name,
endpoints,
fanout=False)
@ -72,7 +72,7 @@ def create_consumers(endpoints, prefix, topic_details, start_listening=True):
return connection
class PluginReportStateAPI(object):
class PluginReportStateAPI:
"""RPC client used to report state back to plugin.
This class implements the client side of an rpc interface. The server side
@ -105,7 +105,7 @@ class PluginReportStateAPI(object):
return method(context, 'report_state', **kwargs)
class PluginApi(object):
class PluginApi:
"""Agent side of the rpc API.
API version history:
@ -223,7 +223,7 @@ class CacheBackedPluginApi(PluginApi):
resources.ADDRESSGROUP]
def __init__(self, *args, **kwargs):
super(CacheBackedPluginApi, self).__init__(*args, **kwargs)
super().__init__(*args, **kwargs)
self.remote_resource_cache = None
self._create_cache_for_l2_agent()
@ -280,7 +280,7 @@ class CacheBackedPluginApi(PluginApi):
"""
is_delete = event == callback_events.AFTER_DELETE
suffix = 'delete' if is_delete else 'update'
method = "%s_%s" % (rtype, suffix)
method = "{}_{}".format(rtype, suffix)
host_with_activation = None
host_with_deactivation = None
if is_delete or rtype != callback_resources.PORT:

View File

@ -66,7 +66,7 @@ def disable_security_group_extension_by_config(aliases):
_disable_extension(sg_rules_default_sg_def.ALIAS, aliases)
class SecurityGroupAgentRpc(object):
class SecurityGroupAgentRpc:
"""Enables SecurityGroup agent support in agent implementations."""
def __init__(self, context, plugin_rpc, local_vlan_map=None,

View File

@ -126,8 +126,8 @@ def get_previous_link(request, items, id_key):
marker = items[0][id_key]
params['marker'] = marker
params['page_reverse'] = True
return "%s?%s" % (prepare_url(get_path_url(request)),
urllib.parse.urlencode(params))
return "{}?{}".format(prepare_url(get_path_url(request)),
urllib.parse.urlencode(params))
def get_next_link(request, items, id_key):
@ -137,8 +137,8 @@ def get_next_link(request, items, id_key):
marker = items[-1][id_key]
params['marker'] = marker
params.pop('page_reverse', None)
return "%s?%s" % (prepare_url(get_path_url(request)),
urllib.parse.urlencode(params))
return "{}?{}".format(prepare_url(get_path_url(request)),
urllib.parse.urlencode(params))
def prepare_url(orig_url):
@ -233,8 +233,8 @@ def get_sorts(request, attr_info):
msg = _("The number of sort_keys and sort_dirs must be same")
raise exc.HTTPBadRequest(explanation=msg)
valid_dirs = [constants.SORT_DIRECTION_ASC, constants.SORT_DIRECTION_DESC]
valid_sort_keys = set(attr for attr, schema in attr_info.items()
if schema.get('is_sort_key', False))
valid_sort_keys = {attr for attr, schema in attr_info.items()
if schema.get('is_sort_key', False)}
absent_keys = [x for x in sort_keys if x not in valid_sort_keys]
if absent_keys:
msg = _("%s is invalid attribute for sort_keys") % absent_keys
@ -291,7 +291,7 @@ def is_filter_validation_supported(plugin):
return getattr(plugin, filter_validation_attr_name, False)
class PaginationHelper(object):
class PaginationHelper:
def __init__(self, request, primary_key='id'):
self.request = request
@ -313,7 +313,7 @@ class PaginationHelper(object):
class PaginationEmulatedHelper(PaginationHelper):
def __init__(self, request, primary_key='id'):
super(PaginationEmulatedHelper, self).__init__(request, primary_key)
super().__init__(request, primary_key)
self.limit, self.marker = get_limit_and_marker(request)
self.page_reverse = get_page_reverse(request)
@ -375,7 +375,7 @@ class NoPaginationHelper(PaginationHelper):
pass
class SortingHelper(object):
class SortingHelper:
def __init__(self, request, attr_info):
pass
@ -393,7 +393,7 @@ class SortingHelper(object):
class SortingEmulatedHelper(SortingHelper):
def __init__(self, request, attr_info):
super(SortingEmulatedHelper, self).__init__(request, attr_info)
super().__init__(request, attr_info)
self.sort_dict = get_sorts(request, attr_info)
def update_fields(self, original_fields, fields_to_add):
@ -450,8 +450,8 @@ def convert_exception_to_http_exc(e, faults, language):
# all error codes are the same so we can maintain the code
# and just concatenate the bodies
joined_msg = "\n".join(
(jsonutils.loads(c.body)['NeutronError']['message']
for c in converted_exceptions))
jsonutils.loads(c.body)['NeutronError']['message']
for c in converted_exceptions)
new_body = jsonutils.loads(converted_exceptions[0].body)
new_body['NeutronError']['message'] = joined_msg
converted_exceptions[0].body = serializer.serialize(new_body)
@ -463,7 +463,7 @@ def convert_exception_to_http_exc(e, faults, language):
inner_error_strings = []
for c in converted_exceptions:
c_body = jsonutils.loads(c.body)
err = ('HTTP %s %s: %s' % (
err = ('HTTP {} {}: {}'.format(
c.code, c_body['NeutronError']['type'],
c_body['NeutronError']['message']))
inner_error_strings.append(err)

View File

@ -157,7 +157,7 @@ class ExtensionMiddleware(base.ConfigurableMiddleware):
resource.collection)
for action, method in resource.collection_actions.items():
conditions = dict(method=[method])
path = "/%s/%s" % (resource.collection, action)
path = "/{}/{}".format(resource.collection, action)
with mapper.submapper(controller=resource.controller,
action=action,
path_prefix=path_prefix,
@ -206,7 +206,7 @@ class ExtensionMiddleware(base.ConfigurableMiddleware):
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
mapper,
singleton=False)
super(ExtensionMiddleware, self).__init__(application)
super().__init__(application)
@classmethod
def factory(cls, global_config, **local_config):
@ -282,7 +282,7 @@ def plugin_aware_extension_middleware_factory(global_config, **local_config):
return _factory
class ExtensionManager(object):
class ExtensionManager:
"""Load extensions from the configured extension path.
See tests/unit/extensions/foxinsocks.py for an
@ -487,13 +487,12 @@ class PluginAwareExtensionManager(ExtensionManager):
def __init__(self, path, plugins):
self.plugins = plugins
super(PluginAwareExtensionManager, self).__init__(path)
super().__init__(path)
self.check_if_plugin_extensions_loaded()
def _check_extension(self, extension):
"""Check if an extension is supported by any plugin."""
extension_is_valid = super(PluginAwareExtensionManager,
self)._check_extension(extension)
extension_is_valid = super()._check_extension(extension)
if not extension_is_valid:
return False
@ -572,7 +571,7 @@ class PluginAwareExtensionManager(ExtensionManager):
extensions=list(missing_aliases))
class RequestExtension(object):
class RequestExtension:
"""Extend requests and responses of core Neutron OpenStack API controllers.
Provide a way to add data to responses and handle custom request data
@ -583,10 +582,10 @@ class RequestExtension(object):
self.url_route = url_route
self.handler = handler
self.conditions = dict(method=[method])
self.key = "%s-%s" % (method, url_route)
self.key = "{}-{}".format(method, url_route)
class ActionExtension(object):
class ActionExtension:
"""Add custom actions to core Neutron OpenStack API controllers."""
def __init__(self, collection, action_name, handler):
@ -595,7 +594,7 @@ class ActionExtension(object):
self.handler = handler
class ResourceExtension(object):
class ResourceExtension:
"""Add top level resources to the OpenStack API in Neutron."""
def __init__(self, collection, controller, parent=None, path_prefix="",

View File

@ -65,7 +65,7 @@ METHOD_PRIORITY_MAP = {
LOG = logging.getLogger(__name__)
class DhcpAgentNotifyAPI(object):
class DhcpAgentNotifyAPI:
"""API for plugin to notify DHCP agent.
This class implements the client side of an rpc interface. The server side

View File

@ -35,7 +35,7 @@ LOG = logging.getLogger(__name__)
AGENT_NOTIFY_MAX_ATTEMPTS = 2
class L3AgentNotifyAPI(object):
class L3AgentNotifyAPI:
"""API for plugin to notify L3 agent."""
def __init__(self, topic=topics.L3_AGENT):

View File

@ -26,7 +26,7 @@ from neutron.db import agentschedulers_db
LOG = logging.getLogger(__name__)
class MeteringAgentNotifyAPI(object):
class MeteringAgentNotifyAPI:
"""API for plugin to notify L3 metering agent."""
def __init__(self, topic=topics.METERING_AGENT):

View File

@ -32,7 +32,7 @@ def _call_with_retry(max_attempts):
def wrapper(f):
def func_wrapper(*args, **kwargs):
# (ivasilevskaya) think of a more informative data to log
action = '%(func)s' % {'func': getattr(f, '__name__', f)}
action = '{func}'.format(func=getattr(f, '__name__', f))
for attempt in range(1, max_attempts + 1):
try:
return f(*args, **kwargs)

View File

@ -30,7 +30,7 @@ def _validate_resource_type(resource_type):
raise exceptions.Invalid(element='resource', value=resource_type)
class ResourceCallbacksManager(object, metaclass=abc.ABCMeta):
class ResourceCallbacksManager(metaclass=abc.ABCMeta):
"""A callback system that allows information providers in a loose manner.
"""
@ -39,10 +39,10 @@ class ResourceCallbacksManager(object, metaclass=abc.ABCMeta):
def __new__(cls, *args, **kwargs):
if not cls._singleton:
return super(ResourceCallbacksManager, cls).__new__(cls)
return super().__new__(cls)
if not hasattr(cls, '_instance'):
cls._instance = super(ResourceCallbacksManager, cls).__new__(cls)
cls._instance = super().__new__(cls)
return cls._instance
@abc.abstractmethod

View File

@ -44,7 +44,7 @@ AgentConsumer = collections.namedtuple('AgentConsumer', ['agent_type',
AgentConsumer.__repr__ = lambda self: '%s@%s' % self
class ResourceConsumerTracker(object):
class ResourceConsumerTracker:
"""Class passed down to collect consumer's resource versions.
This class is responsible for fetching the local versions of
@ -194,7 +194,7 @@ class ResourceConsumerTracker(object):
self._versions = versions
class CachedResourceConsumerTracker(object):
class CachedResourceConsumerTracker:
"""This class takes care of the caching logic of versions."""
def __init__(self):

View File

@ -43,7 +43,7 @@ from neutron.quota import resource_registry
LOG = logging.getLogger(__name__)
class DhcpRpcCallback(object):
class DhcpRpcCallback:
"""DHCP agent RPC callback in plugin implementations.
This class implements the server side of an rpc interface. The client

View File

@ -25,7 +25,7 @@ import oslo_messaging
LOG = logging.getLogger(__name__)
class DVRServerRpcApi(object):
class DVRServerRpcApi:
"""Agent-side RPC (stub) for agent-to-plugin interaction.
This class implements the client side of an rpc interface. The server side
@ -66,7 +66,7 @@ class DVRServerRpcApi(object):
context, 'get_subnet_for_dvr', subnet=subnet, fixed_ips=fixed_ips)
class DVRServerRpcCallback(object):
class DVRServerRpcCallback:
"""Plugin-side RPC (implementation) for agent-to-plugin interaction.
This class implements the server side of an rpc interface. The client side
@ -111,7 +111,7 @@ class DVRServerRpcCallback(object):
context, subnet, fixed_ips=fixed_ips)
class DVRAgentRpcApiMixin(object):
class DVRAgentRpcApiMixin:
"""Plugin-side RPC (stub) for plugin-to-agent interaction."""
DVR_RPC_VERSION = "1.0"
@ -130,7 +130,7 @@ class DVRAgentRpcApiMixin(object):
cctxt.cast(context, 'dvr_mac_address_update', dvr_macs=dvr_macs)
class DVRAgentRpcCallbackMixin(object):
class DVRAgentRpcCallbackMixin:
"""Agent-side RPC (implementation) for plugin-to-agent interaction."""
def dvr_mac_address_update(self, context, **kwargs):

View File

@ -31,7 +31,7 @@ from sqlalchemy import orm
LOG = logging.getLogger(__name__)
class L3RpcCallback(object):
class L3RpcCallback:
"""L3 agent RPC callback in plugin implementations."""
# 1.0 L3PluginApi BASE_RPC_API_VERSION

View File

@ -18,7 +18,7 @@ from neutron_lib.plugins import directory
import oslo_messaging
class MetadataRpcCallback(object):
class MetadataRpcCallback:
"""Metadata agent RPC callback in plugin implementations.
This class implements the server side of an rpc interface used by the

View File

@ -72,7 +72,7 @@ def resource_type_versioned_topic(resource_type, version=None):
'version': version or cls.VERSION}
class ResourcesPullRpcApi(object):
class ResourcesPullRpcApi:
"""Agent-side RPC (stub) for agent-to-plugin interaction.
This class implements the client side of an rpc interface. The server side
@ -83,7 +83,7 @@ class ResourcesPullRpcApi(object):
def __new__(cls):
# make it a singleton
if not hasattr(cls, '_instance'):
cls._instance = super(ResourcesPullRpcApi, cls).__new__(cls)
cls._instance = super().__new__(cls)
target = oslo_messaging.Target(
topic=topics.PLUGIN, version='1.1',
namespace=constants.RPC_NAMESPACE_RESOURCES)
@ -116,7 +116,7 @@ class ResourcesPullRpcApi(object):
for primitive in primitives]
class ResourcesPullRpcCallback(object):
class ResourcesPullRpcCallback:
"""Plugin-side RPC (implementation) for agent-to-plugin interaction.
This class implements the server side of an rpc interface. The client side
@ -148,7 +148,7 @@ class ResourcesPullRpcCallback(object):
**filter_kwargs)]
class ResourcesPushToServersRpcApi(object):
class ResourcesPushToServersRpcApi:
"""Publisher-side RPC (stub) for plugin-to-plugin fanout interaction.
This class implements the client side of an rpc interface. The receiver
@ -173,7 +173,7 @@ class ResourcesPushToServersRpcApi(object):
version_map=version_map)
class ResourcesPushToServerRpcCallback(object):
class ResourcesPushToServerRpcCallback:
"""Receiver-side RPC (implementation) for plugin-to-plugin interaction.
This class implements the receiver side of an rpc interface.
@ -195,7 +195,7 @@ class ResourcesPushToServerRpcCallback(object):
version_manager.update_versions(consumer_id, version_map)
class ResourcesPushRpcApi(object):
class ResourcesPushRpcApi:
"""Plugin-side RPC for plugin-to-agents interaction.
This interface is designed to push versioned object updates to interested
@ -237,7 +237,7 @@ class ResourcesPushRpcApi(object):
LOG.debug(
"Pushing event %s for resources: %s", event_type,
{t:
["ID=%s,revision_number=%s" % (
["ID={},revision_number={}".format(
getattr(obj, 'id', None),
getattr(obj, 'revision_number', None))
for obj in resources_by_type[t]]
@ -262,7 +262,7 @@ class ResourcesPushRpcApi(object):
event_type=event_type)
class ResourcesPushRpcCallback(object):
class ResourcesPushRpcCallback:
"""Agent-side RPC for plugin-to-agents interaction.
This class implements the receiver for notification about versioned objects

View File

@ -33,7 +33,7 @@ from neutron.db import securitygroups_rpc_base as sg_rpc_base
LOG = logging.getLogger(__name__)
class SecurityGroupServerRpcApi(object):
class SecurityGroupServerRpcApi:
"""RPC client for security group methods in the plugin.
This class implements the client side of an rpc interface. This interface
@ -65,7 +65,7 @@ class SecurityGroupServerRpcApi(object):
call_version=call_version)
class SecurityGroupServerRpcCallback(object):
class SecurityGroupServerRpcCallback:
"""Callback for SecurityGroup agent RPC in plugin implementations.
This class implements the server side of an rpc interface. The client side
@ -89,11 +89,11 @@ class SecurityGroupServerRpcCallback(object):
return directory.get_plugin()
def _get_devices_info(self, context, devices):
return dict(
(port['id'], port)
return {
port['id']: port
for port in self.plugin.get_ports_from_devices(context, devices)
if port and not net.is_port_trusted(port)
)
}
def security_group_rules_for_devices(self, context, **kwargs):
"""Callback method to return security group rules for each port.
@ -141,9 +141,9 @@ class SecurityGroupServerRpcCallback(object):
for sg_id in sg_ids:
member_ips = sg_member_ips.get(sg_id, {})
ipv4_ips = member_ips.get("IPv4", set())
comp_ipv4_ips = set([ip for ip, _mac in ipv4_ips])
comp_ipv4_ips = {ip for ip, _mac in ipv4_ips}
ipv6_ips = member_ips.get("IPv6", set())
comp_ipv6_ips = set([ip for ip, _mac in ipv6_ips])
comp_ipv6_ips = {ip for ip, _mac in ipv6_ips}
comp_ips = {"IPv4": comp_ipv4_ips,
"IPv6": comp_ipv6_ips}
sg_member_ips[sg_id] = comp_ips
@ -151,7 +151,7 @@ class SecurityGroupServerRpcCallback(object):
return sg_info
class SecurityGroupAgentRpcApiMixin(object):
class SecurityGroupAgentRpcApiMixin:
"""RPC client for security group methods to the agent.
This class implements the client side of an rpc interface. This interface
@ -193,7 +193,7 @@ class SecurityGroupAgentRpcApiMixin(object):
security_groups=security_groups)
class SecurityGroupAgentRpcCallbackMixin(object):
class SecurityGroupAgentRpcCallbackMixin:
"""A mix-in that enable SecurityGroup support in agent implementations.
This class implements the server side of an rpc interface. The client side
@ -281,8 +281,8 @@ class SecurityGroupServerAPIShim(sg_rpc_base.SecurityGroupInfoAPIMixin):
def get_secgroup_ids_for_address_group(self, address_group_id):
filters = {'remote_address_group_id': (address_group_id, )}
return set([rule.security_group_id for rule in
self.rcache.get_resources('SecurityGroupRule', filters)])
return {rule.security_group_id for rule in
self.rcache.get_resources('SecurityGroupRule', filters)}
def _add_child_sg_rules(self, rtype, event, trigger, payload):
# whenever we receive a full security group, add all child rules
@ -420,8 +420,8 @@ class SecurityGroupServerAPIShim(sg_rpc_base.SecurityGroupInfoAPIMixin):
if not ports:
return []
results = []
sg_ids = set((sg_id for p in ports.values()
for sg_id in p['security_group_ids']))
sg_ids = {sg_id for p in ports.values()
for sg_id in p['security_group_ids']}
rules_by_sgid = collections.defaultdict(list)
for sg_id in sg_ids:
filters = {'security_group_id': (sg_id, )}
@ -434,8 +434,8 @@ class SecurityGroupServerAPIShim(sg_rpc_base.SecurityGroupInfoAPIMixin):
return results
def _select_sg_ids_for_ports(self, context, ports):
sg_ids = set((sg_id for p in ports.values()
for sg_id in p['security_group_ids']))
sg_ids = {sg_id for p in ports.values()
for sg_id in p['security_group_ids']}
return [(sg_id, ) for sg_id in sg_ids]
def _get_sgs_stateful_flag(self, context, sg_ids):

View File

@ -43,7 +43,7 @@ from neutron.quota import resource_registry
LOG = logging.getLogger(__name__)
class Controller(object):
class Controller:
LIST = 'list'
SHOW = 'show'
CREATE = 'create'
@ -129,12 +129,12 @@ class Controller(object):
self._parent_id_name = None
parent_part = ''
self._plugin_handlers = {
self.LIST: 'get%s_%s' % (parent_part, self._collection),
self.SHOW: 'get%s_%s' % (parent_part, self._resource)
self.LIST: 'get{}_{}'.format(parent_part, self._collection),
self.SHOW: 'get{}_{}'.format(parent_part, self._resource)
}
for action in [self.CREATE, self.UPDATE, self.DELETE]:
self._plugin_handlers[action] = '%s%s_%s' % (action, parent_part,
self._resource)
self._plugin_handlers[action] = '{}{}_{}'.format(
action, parent_part, self._resource)
def _get_primary_key(self, default_primary_key='id'):
for key, value in self._attr_info.items():
@ -176,8 +176,8 @@ class Controller(object):
if attr_data and attr_data['is_visible']:
if policy.check(
context,
'%s:%s' % (self._plugin_handlers[self.SHOW],
attr_name),
'{}:{}'.format(self._plugin_handlers[self.SHOW],
attr_name),
data,
might_not_exist=True,
pluralized=self._collection):
@ -824,13 +824,14 @@ class Controller(object):
self.parent['member_name'] in
service_const.EXT_PARENT_RESOURCE_MAPPING):
resource_item.setdefault(
"%s_%s" % (constants.EXT_PARENT_PREFIX,
self._parent_id_name),
"{}_{}".format(constants.EXT_PARENT_PREFIX,
self._parent_id_name),
parent_id)
# If this func is called by create/update/delete, we just add.
else:
resource_item.setdefault(
"%s_%s" % (constants.EXT_PARENT_PREFIX, self._parent_id_name),
"{}_{}".format(constants.EXT_PARENT_PREFIX,
self._parent_id_name),
parent_id)
def _belongs_to_default_sg(self, request, resource_item):

View File

@ -23,7 +23,7 @@ def get_view_builder(req):
return ViewBuilder(base_url)
class ViewBuilder(object):
class ViewBuilder:
def __init__(self, base_url):
"""Object initialization.

View File

@ -61,8 +61,8 @@ class WorkerService(neutron_worker.NeutronBaseWorker):
"""Wraps a worker to be handled by ProcessLauncher"""
def __init__(self, service, application, set_proctitle, disable_ssl=False,
worker_process_count=0, desc=None):
super(WorkerService, self).__init__(worker_process_count,
set_proctitle)
super().__init__(worker_process_count,
set_proctitle)
self._service = service
self._application = application
@ -72,7 +72,7 @@ class WorkerService(neutron_worker.NeutronBaseWorker):
def start(self, desc=None):
desc = desc or self.desc
super(WorkerService, self).start(desc=desc)
super().start(desc=desc)
# When api worker is stopped it kills the eventlet wsgi server which
# internally closes the wsgi server socket object. This server socket
# object becomes not usable which leads to "Bad file descriptor"
@ -99,7 +99,7 @@ class WorkerService(neutron_worker.NeutronBaseWorker):
config.reset_service()
class Server(object):
class Server:
"""Server class to manage multiple WSGI sockets and applications."""
def __init__(self, name, num_threads=None, disable_ssl=False):
@ -141,7 +141,7 @@ class Server(object):
sock = eventlet.listen(bind_addr,
backlog=backlog,
family=family)
except socket.error as err:
except OSError as err:
with excutils.save_and_reraise_exception() as ctxt:
if err.errno == errno.EADDRINUSE:
ctxt.reraise = False
@ -250,7 +250,7 @@ class Request(wsgi.Request):
if len(parts) > 1:
_format = parts[1]
if _format in ['json']:
return 'application/{0}'.format(_format)
return f'application/{_format}'
# Then look up content header
type_from_header = self.get_content_type()
@ -296,7 +296,7 @@ class Request(wsgi.Request):
return self.environ['neutron.context']
class ActionDispatcher(object):
class ActionDispatcher:
"""Maps method name to local methods through action name."""
def dispatch(self, *args, **kwargs):
@ -338,7 +338,7 @@ class ResponseHeaderSerializer(ActionDispatcher):
response.status_int = 200
class ResponseSerializer(object):
class ResponseSerializer:
"""Encode the necessary pieces into a response object."""
def __init__(self, body_serializers=None, headers_serializer=None):
@ -411,7 +411,7 @@ class RequestHeadersDeserializer(ActionDispatcher):
return {}
class RequestDeserializer(object):
class RequestDeserializer:
"""Break up a Request object into more useful pieces."""
def __init__(self, body_deserializers=None, headers_deserializer=None):
@ -498,7 +498,7 @@ class RequestDeserializer(object):
return args
class Application(object):
class Application:
"""Base WSGI application wrapper. Subclasses need to implement __call__."""
@classmethod
@ -690,7 +690,7 @@ class Fault(webob.exc.HTTPException):
# NOTE(salvatore-orlando): this class will go once the
# extension API framework is updated
class Controller(object):
class Controller:
"""WSGI app that dispatched to methods.
WSGI app that reads routing information supplied by RoutesMiddleware
@ -763,7 +763,7 @@ class Controller(object):
# NOTE(salvatore-orlando): this class will go once the
# extension API framework is updated
class Serializer(object):
class Serializer:
"""Serializes and deserializes dictionaries to certain MIME types."""
def __init__(self, metadata=None):

View File

@ -40,7 +40,7 @@ def get_patch_port_names(bridge_name):
return int_if_name, phys_if_name
class PatchPortCleaner(object):
class PatchPortCleaner:
def __init__(self, config):
LOG.debug("Get OVS bridge mappings")
mappings = helpers.parse_mappings(config.OVS.bridge_mappings)

View File

@ -17,7 +17,7 @@ from neutron_lib import constants
def main():
proctitle = "%s (%s)" % (
proctitle = "{} ({})".format(
constants.AGENT_PROCESS_DHCP, setproctitle.getproctitle())
setproctitle.setproctitle(proctitle)

View File

@ -17,7 +17,7 @@ from neutron_lib import constants
def main():
proctitle = "%s (%s)" % (
proctitle = "{} ({})".format(
constants.AGENT_PROCESS_L3, setproctitle.getproctitle())
setproctitle.setproctitle(proctitle)

View File

@ -17,7 +17,7 @@ from neutron_lib import constants
def main():
proctitle = "%s (%s)" % (
proctitle = "{} ({})".format(
constants.AGENT_PROCESS_METADATA, setproctitle.getproctitle())
setproctitle.setproctitle(proctitle)

View File

@ -17,7 +17,7 @@ from neutron_lib import constants
def main():
proctitle = "%s (%s)" % (
proctitle = "{} ({})".format(
constants.AGENT_PROCESS_OVN_METADATA, setproctitle.getproctitle())
setproctitle.setproctitle(proctitle)

View File

@ -20,7 +20,7 @@ AGENT_PROCESS_OVN_NEUTRON_AGENT = 'neutron-ovn-agent'
def main():
proctitle = "%s (%s)" % (AGENT_PROCESS_OVN_NEUTRON_AGENT,
setproctitle.getproctitle())
proctitle = "{} ({})".format(AGENT_PROCESS_OVN_NEUTRON_AGENT,
setproctitle.getproctitle())
setproctitle.setproctitle(proctitle)
ovn_neutron_agent.main()

View File

@ -21,7 +21,7 @@ from neutron_lib import constants
def main():
proctitle = "%s (%s)" % (
proctitle = "{} ({})".format(
constants.AGENT_PROCESS_LINUXBRIDGE, setproctitle.getproctitle())
setproctitle.setproctitle(proctitle)

Some files were not shown because too many files have changed in this diff Show More