diff --git a/neutron/agent/agent_extensions_manager.py b/neutron/agent/agent_extensions_manager.py index a4758da782d..f33774a478c 100644 --- a/neutron/agent/agent_extensions_manager.py +++ b/neutron/agent/agent_extensions_manager.py @@ -24,7 +24,7 @@ class AgentExtensionsManager(stevedore.named.NamedExtensionManager): """Manage agent extensions.""" def __init__(self, conf, namespace): - super(AgentExtensionsManager, self).__init__( + super().__init__( namespace, conf.agent.extensions, invoke_on_load=True, name_order=True) LOG.info("Loaded agent extensions: %s", self.names()) diff --git a/neutron/agent/common/async_process.py b/neutron/agent/common/async_process.py index 96277d0ba53..c527cf42943 100644 --- a/neutron/agent/common/async_process.py +++ b/neutron/agent/common/async_process.py @@ -34,7 +34,7 @@ class AsyncProcessException(Exception): pass -class AsyncProcess(object): +class AsyncProcess: """Manages an asynchronous process. This class spawns a new process via subprocess and uses diff --git a/neutron/agent/common/base_agent_rpc.py b/neutron/agent/common/base_agent_rpc.py index 95b336ed683..b5a2004c7f7 100644 --- a/neutron/agent/common/base_agent_rpc.py +++ b/neutron/agent/common/base_agent_rpc.py @@ -17,7 +17,7 @@ from neutron_lib import rpc as n_rpc from oslo_messaging import Target -class BasePluginApi(object): +class BasePluginApi: """Base agent side of the rpc API""" def __init__(self, topic, namespace, version): target = Target( diff --git a/neutron/agent/common/base_polling.py b/neutron/agent/common/base_polling.py index d654522e9a9..e1b43e41ff1 100644 --- a/neutron/agent/common/base_polling.py +++ b/neutron/agent/common/base_polling.py @@ -14,7 +14,7 @@ # under the License. -class BasePollingManager(object): +class BasePollingManager: def __init__(self): self._force_polling = False diff --git a/neutron/agent/common/ovs_lib.py b/neutron/agent/common/ovs_lib.py index bcbc54ae42f..579a4c7db52 100644 --- a/neutron/agent/common/ovs_lib.py +++ b/neutron/agent/common/ovs_lib.py @@ -118,7 +118,7 @@ def get_gre_tunnel_port_type(remote_ip, local_ip): return p_const.TYPE_GRE -class VifPort(object): +class VifPort: def __init__(self, port_name, ofport, vif_id, vif_mac, switch): self.port_name = port_name self.ofport = ofport @@ -134,7 +134,7 @@ class VifPort(object): self.switch.br_name) -class BaseOVS(object): +class BaseOVS: def __init__(self): self.ovsdb = impl_idl.api_factory() @@ -247,7 +247,7 @@ def version_from_protocol(protocol): class OVSBridge(BaseOVS): def __init__(self, br_name, datapath_type=ovs_constants.OVS_DATAPATH_SYSTEM): - super(OVSBridge, self).__init__() + super().__init__() self.br_name = br_name self.datapath_type = datapath_type self._default_cookie = generate_random_cookie() @@ -336,7 +336,7 @@ class OVSBridge(BaseOVS): # IGMP Neutron configs are more value consistent using True to # enable a feature and False to disable it. flood_value = ('false' if - cfg.CONF.OVS.igmp_flood_unregistered else 'true') + cfg.CONF.OVS.igmp_flood_unregistered else 'true') other_config = { 'mcast-snooping-disable-flood-unregistered': flood_value} with self.ovsdb.transaction() as txn: @@ -1317,7 +1317,7 @@ class OVSBridge(BaseOVS): self.destroy() -class DeferredOVSBridge(object): +class DeferredOVSBridge: '''Deferred OVSBridge. This class wraps add_flow, mod_flow and delete_flows calls to an OVSBridge @@ -1346,7 +1346,7 @@ class DeferredOVSBridge(object): self.full_ordered = full_ordered self.order = order if not self.full_ordered: - self.weights = dict((y, x) for x, y in enumerate(self.order)) + self.weights = {y: x for x, y in enumerate(self.order)} self.action_flow_tuples = [] self.use_bundle = use_bundle @@ -1422,7 +1422,7 @@ def _build_flow_expr_str(flow_dict, cmd, strict): if key == 'proto': flow_expr_arr.append(value) else: - flow_expr_arr.append("%s=%s" % (key, str(value))) + flow_expr_arr.append("{}={}".format(key, str(value))) if actions: flow_expr_arr.append(actions) diff --git a/neutron/agent/common/ovsdb_monitor.py b/neutron/agent/common/ovsdb_monitor.py index 49e7cee4971..2d9d96666d2 100644 --- a/neutron/agent/common/ovsdb_monitor.py +++ b/neutron/agent/common/ovsdb_monitor.py @@ -50,10 +50,10 @@ class OvsdbMonitor(async_process.AsyncProcess): cmd.append(','.join(columns)) if format: cmd.append('--format=%s' % format) - super(OvsdbMonitor, self).__init__(cmd, run_as_root=run_as_root, - respawn_interval=respawn_interval, - log_output=True, - die_on_error=False) + super().__init__(cmd, run_as_root=run_as_root, + respawn_interval=respawn_interval, + log_output=True, + die_on_error=False) self.new_events = {'added': [], 'removed': [], 'modified': []} def get_events(self): @@ -63,7 +63,7 @@ class OvsdbMonitor(async_process.AsyncProcess): return events def start(self, block=False, timeout=60): - super(OvsdbMonitor, self).start() + super().start() if block: utils.wait_until_true(self.is_active, timeout=timeout) @@ -80,7 +80,7 @@ class SimpleInterfaceMonitor(OvsdbMonitor): bridge_names=None, ovs=None): self._bridge_names = bridge_names or [] self._ovs = ovs - super(SimpleInterfaceMonitor, self).__init__( + super().__init__( 'Interface', columns=['name', 'ofport', 'external_ids'], format='json', diff --git a/neutron/agent/common/placement_report.py b/neutron/agent/common/placement_report.py index dab95e91089..eaccc320b4b 100644 --- a/neutron/agent/common/placement_report.py +++ b/neutron/agent/common/placement_report.py @@ -24,7 +24,7 @@ from neutron.common import _constants as n_const LOG = logging.getLogger(__name__) -class DeferredCall(object): +class DeferredCall: '''Store a callable for later calling. This is hardly more than a parameterless lambda, but this way it's much @@ -37,17 +37,17 @@ class DeferredCall(object): self.kwargs = kwargs def __str__(self): - return '%s(%s)' % ( + return '{}({})'.format( self.func.__name__, ', '.join([repr(x) for x in self.args] + - ['%s=%s' % (k, repr(v)) + ['{}={}'.format(k, repr(v)) for k, v in self.kwargs.items()])) def execute(self): return self.func(*self.args, **self.kwargs) -class PlacementState(object): +class PlacementState: '''Represents the desired state of the Placement DB. This represents the state of one Neutron agent @@ -143,7 +143,8 @@ class PlacementState(object): # we must create an agent RP under each hypervisor RP. rps = [] for hypervisor in self._hypervisor_rps.values(): - agent_rp_name = '%s:%s' % (hypervisor['name'], self._agent_type) + agent_rp_name = '{}:{}'.format( + hypervisor['name'], self._agent_type) agent_rp_uuid = place_utils.agent_resource_provider_uuid( self._driver_uuid_namespace, hypervisor['name']) rps.append( @@ -159,7 +160,7 @@ class PlacementState(object): rps = [] for device in self._rp_bandwidths: hypervisor = self._hypervisor_rps[device] - rp_name = '%s:%s:%s' % ( + rp_name = '{}:{}:{}'.format( hypervisor['name'], self._agent_type, device) rp_uuid = place_utils.device_resource_provider_uuid( self._driver_uuid_namespace, @@ -192,7 +193,7 @@ class PlacementState(object): # Remove hypervisor duplicates to avoid calling placement API multiple # times for the same hypervisor. - hypervisors = set(h['name'] for h in self._hypervisor_rps.values()) + hypervisors = {h['name'] for h in self._hypervisor_rps.values()} for hypervisor in hypervisors: agent_rp_uuid = place_utils.agent_resource_provider_uuid( self._driver_uuid_namespace, hypervisor) diff --git a/neutron/agent/common/polling.py b/neutron/agent/common/polling.py index 85e9731fe15..8aeeea39e31 100644 --- a/neutron/agent/common/polling.py +++ b/neutron/agent/common/polling.py @@ -54,7 +54,7 @@ class InterfacePollingMinimizer(base_polling.BasePollingManager): ovsdb_monitor_respawn_interval=ovs_const.DEFAULT_OVSDBMON_RESPAWN, bridge_names=None, ovs=None): - super(InterfacePollingMinimizer, self).__init__() + super().__init__() self._monitor = ovsdb_monitor.SimpleInterfaceMonitor( respawn_interval=ovsdb_monitor_respawn_interval, ovsdb_connection=cfg.CONF.OVS.ovsdb_connection, diff --git a/neutron/agent/common/resource_processing_queue.py b/neutron/agent/common/resource_processing_queue.py index 444f03ee82e..ba1010d22a7 100644 --- a/neutron/agent/common/resource_processing_queue.py +++ b/neutron/agent/common/resource_processing_queue.py @@ -21,7 +21,7 @@ from oslo_utils import timeutils from oslo_utils import uuidutils -class ResourceUpdate(object): +class ResourceUpdate: """Encapsulates a resource update An instance of this object carries the information necessary to prioritize @@ -79,7 +79,7 @@ class ResourceUpdate(object): return self.tries < 0 -class ExclusiveResourceProcessor(object): +class ExclusiveResourceProcessor: """Manager for access to a resource for processing This class controls access to a resource in a non-blocking way. The first @@ -159,7 +159,7 @@ class ExclusiveResourceProcessor(object): yield update -class ResourceProcessingQueue(object): +class ResourceProcessingQueue: """Manager of the queue of resources to process.""" def __init__(self): self._queue = queue.PriorityQueue() diff --git a/neutron/agent/dhcp/agent.py b/neutron/agent/dhcp/agent.py index d696408c34d..ed96b14878b 100644 --- a/neutron/agent/dhcp/agent.py +++ b/neutron/agent/dhcp/agent.py @@ -83,10 +83,10 @@ class DHCPResourceUpdate(queue.ResourceUpdate): def __lt__(self, other): if other.obj_type == self.obj_type == 'port': - self_ips = set(str(fixed_ip['ip_address']) for - fixed_ip in self.resource['fixed_ips']) - other_ips = set(str(fixed_ip['ip_address']) for - fixed_ip in other.resource['fixed_ips']) + self_ips = {str(fixed_ip['ip_address']) for + fixed_ip in self.resource['fixed_ips']} + other_ips = {str(fixed_ip['ip_address']) for + fixed_ip in other.resource['fixed_ips']} if self_ips & other_ips: return self.timestamp < other.timestamp @@ -105,7 +105,7 @@ class DhcpAgent(manager.Manager): target = oslo_messaging.Target(version='1.0') def __init__(self, host=None, conf=None): - super(DhcpAgent, self).__init__(host=host) + super().__init__(host=host) self.needs_resync_reasons = collections.defaultdict(list) self.dhcp_ready_ports = set() self.dhcp_prio_ready_ports = set() @@ -306,7 +306,7 @@ class DhcpAgent(manager.Manager): active_networks = self.plugin_rpc.get_active_networks_info( enable_dhcp_filter=False) LOG.info('All active networks have been fetched through RPC.') - active_network_ids = set(network.id for network in active_networks) + active_network_ids = {network.id for network in active_networks} for deleted_id in known_network_ids - active_network_ids: try: self.disable_dhcp_helper(deleted_id) @@ -919,7 +919,7 @@ class DhcpPluginApi(base_agent_rpc.BasePluginApi): return [dhcp.NetModel(net) for net in nets] -class NetworkCache(object): +class NetworkCache: """Agent cache of the current network state.""" def __init__(self): self.cache = {} @@ -1050,7 +1050,7 @@ class NetworkCache(object): class DhcpAgentWithStateReport(DhcpAgent): def __init__(self, host=None, conf=None): - super(DhcpAgentWithStateReport, self).__init__(host=host, conf=conf) + super().__init__(host=host, conf=conf) self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS) self.failed_report_state = False self.agent_state = { diff --git a/neutron/agent/firewall.py b/neutron/agent/firewall.py index 62eee300e79..ebf789bd6ec 100644 --- a/neutron/agent/firewall.py +++ b/neutron/agent/firewall.py @@ -50,7 +50,7 @@ def load_firewall_driver_class(driver): 'neutron.agent.firewall_drivers', driver) -class FirewallDriver(object, metaclass=abc.ABCMeta): +class FirewallDriver(metaclass=abc.ABCMeta): """Firewall Driver base class. Defines methods that any driver providing security groups diff --git a/neutron/agent/l2/extensions/dhcp/base.py b/neutron/agent/l2/extensions/dhcp/base.py index d0decd3ba53..7b7d7d86c90 100644 --- a/neutron/agent/l2/extensions/dhcp/base.py +++ b/neutron/agent/l2/extensions/dhcp/base.py @@ -41,7 +41,7 @@ IPV6_STR = "v6" class DHCPResponderBase(base_oskenapp.BaseNeutronAgentOSKenApp): def __init__(self, agent_api, ext_api, *args, version=IPV4_STR, **kwargs): - super(DHCPResponderBase, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.agent_api = agent_api self.int_br = self.agent_api.request_int_br() self.ext_api = ext_api diff --git a/neutron/agent/l2/extensions/dhcp/extension.py b/neutron/agent/l2/extensions/dhcp/extension.py index df57710dcbe..14ba3343b38 100644 --- a/neutron/agent/l2/extensions/dhcp/extension.py +++ b/neutron/agent/l2/extensions/dhcp/extension.py @@ -28,7 +28,7 @@ from neutron.api.rpc.callbacks import resources LOG = logging.getLogger(__name__) -class DHCPExtensionPortInfoAPI(object): +class DHCPExtensionPortInfoAPI: def __init__(self, cache_api): self.cache_api = cache_api diff --git a/neutron/agent/l2/extensions/dhcp/ipv4.py b/neutron/agent/l2/extensions/dhcp/ipv4.py index 6fea7ed01f5..aeb7a20a81f 100644 --- a/neutron/agent/l2/extensions/dhcp/ipv4.py +++ b/neutron/agent/l2/extensions/dhcp/ipv4.py @@ -152,7 +152,7 @@ class DHCPIPv4Responder(dhcp_base.DHCPResponderBase): if is_ack: fqdn = 'host-%s' % ip_addr.replace('.', '-').replace(':', '-') if cfg.CONF.dns_domain: - fqdn = '%s.%s' % (fqdn, cfg.CONF.dns_domain) + fqdn = '{}.{}'.format(fqdn, cfg.CONF.dns_domain) domain_name_bin = struct.pack('!%ds' % len(fqdn), bytes(str(fqdn).encode())) options.option_list.append( diff --git a/neutron/agent/l2/extensions/dhcp/ipv6.py b/neutron/agent/l2/extensions/dhcp/ipv6.py index 021a4c6c65a..8078db226af 100644 --- a/neutron/agent/l2/extensions/dhcp/ipv6.py +++ b/neutron/agent/l2/extensions/dhcp/ipv6.py @@ -58,9 +58,9 @@ DHCPV6_OPTION_FQDN = 39 class DHCPIPv6Responder(dhcp_base.DHCPResponderBase): def __init__(self, agent_api, ext_api, *args, **kwargs): - super(DHCPIPv6Responder, self).__init__(agent_api, ext_api, - version=dhcp_base.IPV6_STR, - *args, **kwargs) + super().__init__(agent_api, ext_api, + version=dhcp_base.IPV6_STR, + *args, **kwargs) def _create_duid(self, mac): """Create a DUID based on the mac address and time. @@ -221,7 +221,7 @@ class DHCPIPv6Responder(dhcp_base.DHCPResponderBase): # 39: Fully Qualified Domain Name fqdn = 'host-%s' % ip_addr.replace('.', '-').replace(':', '-') if req_type == 'REQUEST' and cfg.CONF.dns_domain: - fqdn = '%s.%s' % (fqdn, cfg.CONF.dns_domain) + fqdn = '{}.{}'.format(fqdn, cfg.CONF.dns_domain) # 0000 0... = Reserved: 0x00 # .... .0.. = N bit: Server should perform DNS updates diff --git a/neutron/agent/l2/extensions/fdb_population.py b/neutron/agent/l2/extensions/fdb_population.py index b532f7db254..032f5e85bb6 100644 --- a/neutron/agent/l2/extensions/fdb_population.py +++ b/neutron/agent/l2/extensions/fdb_population.py @@ -60,7 +60,7 @@ class FdbPopulationAgentExtension( constants.DEVICE_OWNER_ROUTER_INTF, constants.DEVICE_OWNER_DHCP} - class FdbTableTracker(object): + class FdbTableTracker: """FDB table tracker is a helper class intended to keep track of the existing FDB rules. """ diff --git a/neutron/agent/l2/extensions/local_ip.py b/neutron/agent/l2/extensions/local_ip.py index 66a08910c63..7bcc6a8248c 100644 --- a/neutron/agent/l2/extensions/local_ip.py +++ b/neutron/agent/l2/extensions/local_ip.py @@ -199,7 +199,7 @@ class LocalIPAgentExtension(l2_extension.L2AgentExtension): priority=10, nw_dst=local_ip, reg6=vlan, - dl_type="0x{:04x}".format(ether_types.ETH_TYPE_IP), + dl_type=f"0x{ether_types.ETH_TYPE_IP:04x}", actions='mod_dl_dst:{:s},' 'ct(commit,table={:d},zone={:d},nat(dst={:s}))'.format( mac, ovs_constants.TRANSIENT_TABLE, vlan, dest_ip) @@ -211,7 +211,7 @@ class LocalIPAgentExtension(l2_extension.L2AgentExtension): nw_src=dest_ip, reg6=vlan, ct_state="-trk", - dl_type="0x{:04x}".format(ether_types.ETH_TYPE_IP), + dl_type=f"0x{ether_types.ETH_TYPE_IP:04x}", actions='ct(table={:d},zone={:d},nat'.format( ovs_constants.TRANSIENT_TABLE, vlan) ) @@ -225,8 +225,8 @@ class LocalIPAgentExtension(l2_extension.L2AgentExtension): nw_src=dest_ip, nw_dst=local_ip, reg6=vlan, - dl_type="0x{:04x}".format(ether_types.ETH_TYPE_IP), - actions='resubmit(,{:d})'.format(ovs_constants.TRANSIENT_TABLE) + dl_type=f"0x{ether_types.ETH_TYPE_IP:04x}", + actions=f'resubmit(,{ovs_constants.TRANSIENT_TABLE:d})' ) def delete_local_ip_translation(self, vlan, local_ip, dest_ip, mac): diff --git a/neutron/agent/l2/extensions/metadata/host_metadata_proxy.py b/neutron/agent/l2/extensions/metadata/host_metadata_proxy.py index aa89782b85b..2e8fa804a67 100644 --- a/neutron/agent/l2/extensions/metadata/host_metadata_proxy.py +++ b/neutron/agent/l2/extensions/metadata/host_metadata_proxy.py @@ -89,7 +89,7 @@ backend backend_{{ instance.uuid }}_{{ instance.provider_ip }} """) -class ProxyInstance(object): +class ProxyInstance: def __init__(self, instance_id, provider_ip, project_id): self.uuid = instance_id self.provider_ip = provider_ip @@ -98,7 +98,7 @@ class ProxyInstance(object): cfg.CONF.METADATA, self.uuid) -class HostMedataHAProxyDaemonMonitor(object): +class HostMedataHAProxyDaemonMonitor: """Manage the data and state of a host metadata haproxy process.""" def __init__(self, process_monitor, uuid=None, @@ -114,7 +114,7 @@ class HostMedataHAProxyDaemonMonitor(object): cfg.CONF.state_path, self._host_id, 'haproxy.conf', True) buf = io.StringIO() - meta_api = "%s:%s" % ( + meta_api = "{}:{}".format( cfg.CONF.METADATA.nova_metadata_host, cfg.CONF.METADATA.nova_metadata_port) @@ -138,7 +138,7 @@ class HostMedataHAProxyDaemonMonitor(object): buf.write('%s' % _HOST_PATH_PROXY_TEMPLATE.render( log_level='debug', - log_tag="%s-%s" % (PROXY_SERVICE_NAME, self._host_id), + log_tag="{}-{}".format(PROXY_SERVICE_NAME, self._host_id), user=username, group=groupname, maxconn=1024, diff --git a/neutron/agent/l2/extensions/qos.py b/neutron/agent/l2/extensions/qos.py index 11a18583bea..215c405d7e9 100644 --- a/neutron/agent/l2/extensions/qos.py +++ b/neutron/agent/l2/extensions/qos.py @@ -31,7 +31,7 @@ from neutron import manager LOG = logging.getLogger(__name__) -class QosAgentDriver(object, metaclass=abc.ABCMeta): +class QosAgentDriver(metaclass=abc.ABCMeta): """Defines stable abstract interface for QoS Agent Driver. QoS Agent driver defines the interface to be implemented by Agent @@ -111,8 +111,8 @@ class QosAgentDriver(object, metaclass=abc.ABCMeta): def _handle_rule_delete(self, port, rule_type, ingress=False): handler_name = "".join(("delete_", rule_type)) if ingress: - handler_name = "%s_%s" % (handler_name, - constants.INGRESS_DIRECTION) + handler_name = "{}_{}".format(handler_name, + constants.INGRESS_DIRECTION) handler = getattr(self, handler_name) handler(port) @@ -138,7 +138,7 @@ class QosAgentDriver(object, metaclass=abc.ABCMeta): return rule_direction == constants.INGRESS_DIRECTION -class PortPolicyMap(object): +class PortPolicyMap: def __init__(self): # we cannot use a dict of sets here because port dicts are not hashable self.qos_policy_ports = collections.defaultdict(dict) diff --git a/neutron/agent/l2/l2_agent_extensions_manager.py b/neutron/agent/l2/l2_agent_extensions_manager.py index 226f2999208..776abdd3cc4 100644 --- a/neutron/agent/l2/l2_agent_extensions_manager.py +++ b/neutron/agent/l2/l2_agent_extensions_manager.py @@ -32,8 +32,7 @@ class L2AgentExtensionsManager(agent_ext_manager.AgentExtensionsManager): """ def __init__(self, conf): - super(L2AgentExtensionsManager, - self).__init__(conf, L2_AGENT_EXT_MANAGER_NAMESPACE) + super().__init__(conf, L2_AGENT_EXT_MANAGER_NAMESPACE) def handle_port(self, context, data): """Notify all agent extensions to handle port.""" diff --git a/neutron/agent/l3/agent.py b/neutron/agent/l3/agent.py index a8fb4f6b009..116df1aca93 100644 --- a/neutron/agent/l3/agent.py +++ b/neutron/agent/l3/agent.py @@ -94,7 +94,7 @@ def log_verbose_exc(message, router_payload): router_payload, indent=5)) -class L3PluginApi(object): +class L3PluginApi: """Agent side of the l3 agent RPC API. API version history: @@ -213,7 +213,7 @@ class L3PluginApi(object): context, 'get_networks', filters=filters, fields=fields) -class RouterFactory(object): +class RouterFactory: def __init__(self): self._routers = {} @@ -330,7 +330,7 @@ class L3NATAgent(ha.AgentMixin, self._pool_size = ROUTER_PROCESS_GREENLET_MIN self._pool = eventlet.GreenPool(size=self._pool_size) self._queue = queue.ResourceProcessingQueue() - super(L3NATAgent, self).__init__(host=self.conf.host) + super().__init__(host=self.conf.host) self.target_ex_net_id = None self.use_ipv6 = netutils.is_ipv6_enabled() @@ -999,7 +999,7 @@ class L3NATAgent(ha.AgentMixin, class L3NATAgentWithStateReport(L3NATAgent): def __init__(self, host, conf=None): - super(L3NATAgentWithStateReport, self).__init__(host=host, conf=conf) + super().__init__(host=host, conf=conf) self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS) self.failed_report_state = False self.agent_state = { diff --git a/neutron/agent/l3/dvr.py b/neutron/agent/l3/dvr.py index f267cf739f3..9fbd9fb7334 100644 --- a/neutron/agent/l3/dvr.py +++ b/neutron/agent/l3/dvr.py @@ -17,11 +17,11 @@ import weakref from neutron.agent.l3 import dvr_fip_ns -class AgentMixin(object): +class AgentMixin: def __init__(self, host): # dvr data self._fip_namespaces = weakref.WeakValueDictionary() - super(AgentMixin, self).__init__(host) + super().__init__(host) def get_fip_ns(self, ext_net_id): # TODO(Carl) is this necessary? Code that this replaced was careful to diff --git a/neutron/agent/l3/dvr_edge_ha_router.py b/neutron/agent/l3/dvr_edge_ha_router.py index 85344fd797b..ade4e4cf4ca 100644 --- a/neutron/agent/l3/dvr_edge_ha_router.py +++ b/neutron/agent/l3/dvr_edge_ha_router.py @@ -31,8 +31,8 @@ class DvrEdgeHaRouter(dvr_edge_router.DvrEdgeRouter, """ def __init__(self, host, *args, **kwargs): - super(DvrEdgeHaRouter, self).__init__(host, - *args, **kwargs) + super().__init__(host, + *args, **kwargs) self.enable_snat = None @property @@ -85,7 +85,7 @@ class DvrEdgeHaRouter(dvr_edge_router.DvrEdgeRouter, self.set_ha_port() if (self.is_router_primary() and self.ha_port and self.ha_port['status'] == constants.PORT_STATUS_ACTIVE): - return super(DvrEdgeHaRouter, self).add_centralized_floatingip( + return super().add_centralized_floatingip( fip, fip_cidr) else: return constants.FLOATINGIP_STATUS_ACTIVE @@ -93,7 +93,7 @@ class DvrEdgeHaRouter(dvr_edge_router.DvrEdgeRouter, def remove_centralized_floatingip(self, fip_cidr): self._remove_vip(fip_cidr) if self.is_router_primary(): - super(DvrEdgeHaRouter, self).remove_centralized_floatingip( + super().remove_centralized_floatingip( fip_cidr) def get_centralized_fip_cidr_set(self): @@ -105,7 +105,7 @@ class DvrEdgeHaRouter(dvr_edge_router.DvrEdgeRouter, return set(self._get_cidrs_from_keepalived(interface_name)) def external_gateway_added(self, ex_gw_port, interface_name): - super(DvrEdgeHaRouter, self).external_gateway_added( + super().external_gateway_added( ex_gw_port, interface_name) for port in self.get_snat_interfaces(): snat_interface_name = self._get_snat_int_device_name(port['id']) @@ -124,7 +124,7 @@ class DvrEdgeHaRouter(dvr_edge_router.DvrEdgeRouter, namespace=self.ha_namespace, prefix=constants.SNAT_INT_DEV_PREFIX) self._clear_vips(snat_interface) - super(DvrEdgeHaRouter, self)._external_gateway_removed( + super()._external_gateway_removed( ex_gw_port, interface_name) self._clear_vips(interface_name) @@ -140,7 +140,7 @@ class DvrEdgeHaRouter(dvr_edge_router.DvrEdgeRouter, return self.agent_conf.agent_mode == constants.L3_AGENT_MODE_DVR_SNAT def _dvr_internal_network_removed(self, port): - super(DvrEdgeHaRouter, self)._dvr_internal_network_removed(port) + super()._dvr_internal_network_removed(port) sn_port = self.get_snat_port_for_internal_port(port, self.snat_ports) if not sn_port: return diff --git a/neutron/agent/l3/dvr_edge_router.py b/neutron/agent/l3/dvr_edge_router.py index a335e8b3c2f..e8fbe968dae 100644 --- a/neutron/agent/l3/dvr_edge_router.py +++ b/neutron/agent/l3/dvr_edge_router.py @@ -28,7 +28,7 @@ LOG = logging.getLogger(__name__) class DvrEdgeRouter(dvr_local_router.DvrLocalRouter): def __init__(self, host, *args, **kwargs): - super(DvrEdgeRouter, self).__init__(host, *args, **kwargs) + super().__init__(host, *args, **kwargs) self.snat_namespace = dvr_snat_ns.SnatNamespace( self.router_id, self.agent_conf, self.driver, self.use_ipv6) self.snat_iptables_manager = None @@ -37,7 +37,7 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter): return self.snat_namespace.name def external_gateway_added(self, ex_gw_port, interface_name): - super(DvrEdgeRouter, self).external_gateway_added( + super().external_gateway_added( ex_gw_port, interface_name) if self._is_this_snat_host(): self._create_dvr_gateway(ex_gw_port, interface_name) @@ -89,8 +89,8 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter): preserve_ips) def _external_gateway_removed(self, ex_gw_port, interface_name): - super(DvrEdgeRouter, self).external_gateway_removed(ex_gw_port, - interface_name) + super().external_gateway_removed(ex_gw_port, + interface_name) if not self._is_this_snat_host() and not self.snat_namespace.exists(): # no centralized SNAT gateway for this node/agent LOG.debug("not hosting snat for router: %s", self.router['id']) @@ -106,7 +106,7 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter): self.snat_namespace.delete() def internal_network_added(self, port): - super(DvrEdgeRouter, self).internal_network_added(port) + super().internal_network_added(port) # TODO(gsagie) some of this checks are already implemented # in the base class, think how to avoid re-doing them @@ -143,12 +143,12 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter): prefix=lib_constants.SNAT_INT_DEV_PREFIX) def internal_network_updated(self, port): - super(DvrEdgeRouter, self).internal_network_updated(port) + super().internal_network_updated(port) if port: self._set_snat_interfce_mtu(port) def _dvr_internal_network_removed(self, port): - super(DvrEdgeRouter, self)._dvr_internal_network_removed(port) + super()._dvr_internal_network_removed(port) if not self.ex_gw_port: return @@ -178,7 +178,7 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter): def initialize(self, process_monitor): self._create_snat_namespace() - super(DvrEdgeRouter, self).initialize(process_monitor) + super().initialize(process_monitor) def _create_dvr_gateway(self, ex_gw_port, gw_interface_name): # connect snat_ports to br_int from SNAT namespace @@ -220,7 +220,7 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter): return host == self.host def _handle_router_snat_rules(self, ex_gw_port, interface_name): - super(DvrEdgeRouter, self)._handle_router_snat_rules( + super()._handle_router_snat_rules( ex_gw_port, interface_name) if not self._is_this_snat_host(): @@ -259,21 +259,21 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter): if self._should_update_snat_routing_table(): ns_name = self.snat_namespace.name self._update_routing_table(operation, route, ns_name) - super(DvrEdgeRouter, self).update_routing_table(operation, route) + super().update_routing_table(operation, route) def update_routing_table_ecmp(self, route_list): if self._should_update_snat_routing_table(): ns_name = self.snat_namespace.name self._update_routing_table_ecmp(route_list, ns_name) - super(DvrEdgeRouter, self).update_routing_table_ecmp(route_list) + super().update_routing_table_ecmp(route_list) def delete(self): - super(DvrEdgeRouter, self).delete() + super().delete() if self.snat_namespace.exists(): self.snat_namespace.delete() def process_address_scope(self): - super(DvrEdgeRouter, self).process_address_scope() + super().process_address_scope() if not self._is_this_snat_host(): return @@ -327,9 +327,9 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter): return set() interface_name = self.get_snat_external_device_interface_name( ex_gw_port) - return set([addr['cidr'] for addr in ip_lib.get_devices_with_ip( + return {addr['cidr'] for addr in ip_lib.get_devices_with_ip( self.snat_namespace.name, - name=interface_name)]) + name=interface_name)} def get_router_cidrs(self, device): """Over-ride the get_router_cidrs function to return the list. @@ -339,7 +339,7 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter): This includes the centralized floatingip cidr list and the regular floatingip cidr list that are bound to fip namespace. """ - fip_cidrs = super(DvrEdgeRouter, self).get_router_cidrs(device) + fip_cidrs = super().get_router_cidrs(device) centralized_cidrs = self.get_centralized_fip_cidr_set() return fip_cidrs | centralized_cidrs @@ -382,7 +382,8 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter): return lib_constants.FLOATINGIP_STATUS_ACTIVE def _centralized_floating_forward_rules(self, floating_ip, fixed_ip): - to_source = '-s %s/32 -j SNAT --to-source %s' % (fixed_ip, floating_ip) + to_source = '-s {}/32 -j SNAT --to-source {}'.format( + fixed_ip, floating_ip) if self.snat_iptables_manager.random_fully: to_source += ' --random-fully' return [('PREROUTING', '-d %s/32 -j DNAT --to-destination %s' % @@ -417,4 +418,4 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter): # Cover mixed dvr_snat and compute node, aka a dvr_snat node has both # centralized and distributed floating IPs. - super(DvrEdgeRouter, self).process_floating_ip_nat_rules() + super().process_floating_ip_nat_rules() diff --git a/neutron/agent/l3/dvr_fip_ns.py b/neutron/agent/l3/dvr_fip_ns.py index 1e0386e80c5..418bfdde472 100644 --- a/neutron/agent/l3/dvr_fip_ns.py +++ b/neutron/agent/l3/dvr_fip_ns.py @@ -51,7 +51,7 @@ class FipNamespace(namespaces.Namespace): def __init__(self, ext_net_id, agent_conf, driver, use_ipv6): name = self._get_ns_name(ext_net_id) - super(FipNamespace, self).__init__( + super().__init__( name, agent_conf, driver, use_ipv6) self._ext_net_id = ext_net_id @@ -208,7 +208,7 @@ class FipNamespace(namespaces.Namespace): def create(self): LOG.debug("DVR: add fip namespace: %s", self.name) # parent class will ensure the namespace exists and turn-on forwarding - super(FipNamespace, self).create() + super().create() ip_lib.set_ip_nonlocal_bind_for_namespace(self.name, 1, root_namespace=True) @@ -239,7 +239,7 @@ class FipNamespace(namespaces.Namespace): # TODO(mrsmith): add LOG warn if fip count != 0 LOG.debug('DVR: destroy fip namespace: %s', self.name) - super(FipNamespace, self).delete() + super().delete() def _check_for_gateway_ip_change(self, new_agent_gateway_port): diff --git a/neutron/agent/l3/dvr_local_router.py b/neutron/agent/l3/dvr_local_router.py index 4d4d401c33a..47df0587687 100644 --- a/neutron/agent/l3/dvr_local_router.py +++ b/neutron/agent/l3/dvr_local_router.py @@ -38,7 +38,7 @@ Arp_entry = collections.namedtuple( class DvrLocalRouter(dvr_router_base.DvrRouterBase): def __init__(self, host, *args, **kwargs): - super(DvrLocalRouter, self).__init__(host, *args, **kwargs) + super().__init__(host, *args, **kwargs) self.floating_ips_dict = {} # Linklocal subnet for router and floating IP namespace link @@ -132,9 +132,10 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase): floating_ip = fip['floating_ip_address'] rtr_2_fip_name = self.fip_ns.get_rtr_ext_device_name(self.router_id) dnat_from_floatingip_to_fixedip = ( - 'PREROUTING', '-d %s/32 -i %s -j DNAT --to-destination %s' % ( + 'PREROUTING', '-d {}/32 -i {} -j DNAT --to-destination {}'.format( floating_ip, rtr_2_fip_name, fixed_ip)) - to_source = '-s %s/32 -j SNAT --to-source %s' % (fixed_ip, floating_ip) + to_source = '-s {}/32 -j SNAT --to-source {}'.format( + fixed_ip, floating_ip) if self.iptables_manager.random_fully: to_source += ' --random-fully' snat_from_fixedip_to_floatingip = ('float-snat', to_source) @@ -147,7 +148,7 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase): rtr_2_fip_name = self.fip_ns.get_rtr_ext_device_name(self.router_id) mark_traffic_to_floating_ip = ( - 'floatingip', '-d %s/32 -i %s -j MARK --set-xmark %s' % ( + 'floatingip', '-d {}/32 -i {} -j MARK --set-xmark {}'.format( floating_ip, rtr_2_fip_name, internal_mark)) mark_traffic_from_fixed_ip = ( 'FORWARD', '-s %s/32 -j $float-snat' % fixed_ip) @@ -505,7 +506,7 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase): self._snat_redirect_modify(gateway, sn_port, sn_int, is_add=False) def internal_network_added(self, port): - super(DvrLocalRouter, self).internal_network_added(port) + super().internal_network_added(port) # NOTE: The following function _set_subnet_arp_info # should be called to dynamically populate the arp @@ -576,7 +577,7 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase): def internal_network_removed(self, port): self._dvr_internal_network_removed(port) - super(DvrLocalRouter, self).internal_network_removed(port) + super().internal_network_removed(port) def get_floating_agent_gw_interface(self, ext_net_id): """Filter Floating Agent GW port for the external network.""" @@ -739,7 +740,7 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase): if ex_gw_port: self.create_dvr_external_gateway_on_agent(ex_gw_port) self.connect_rtr_2_fip() - super(DvrLocalRouter, self).process_external() + super().process_external() def _check_rtr_2_fip_connect(self): """Checks if the rtr to fip connect exists, if not sets to false.""" @@ -866,7 +867,7 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase): tbl_index = self._get_snat_idx(fip_2_rtr) self._update_fip_route_table_with_next_hop_routes( operation, route, fip_ns_name, tbl_index) - super(DvrLocalRouter, self).update_routing_table(operation, route) + super().update_routing_table(operation, route) def _update_fip_route_table_with_next_hop_routes(self, operation, route, fip_ns_name, tbl_index): @@ -923,4 +924,4 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase): self.fip_ns = self.agent.get_fip_ns(ex_gw_port['network_id']) self.fip_ns.scan_fip_ports(self) - super(DvrLocalRouter, self).process() + super().process() diff --git a/neutron/agent/l3/dvr_router_base.py b/neutron/agent/l3/dvr_router_base.py index 06d92ef24d2..3cbe267306d 100644 --- a/neutron/agent/l3/dvr_router_base.py +++ b/neutron/agent/l3/dvr_router_base.py @@ -20,13 +20,13 @@ LOG = logging.getLogger(__name__) class DvrRouterBase(router.RouterInfo): def __init__(self, host, *args, **kwargs): - super(DvrRouterBase, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.host = host self.snat_ports = None def process(self): - super(DvrRouterBase, self).process() + super().process() # NOTE: Keep a copy of the interfaces around for when they are removed self.snat_ports = self.get_snat_interfaces() diff --git a/neutron/agent/l3/dvr_snat_ns.py b/neutron/agent/l3/dvr_snat_ns.py index a8761bcb9a8..c5b73c72c14 100644 --- a/neutron/agent/l3/dvr_snat_ns.py +++ b/neutron/agent/l3/dvr_snat_ns.py @@ -25,11 +25,11 @@ class SnatNamespace(namespaces.Namespace): def __init__(self, router_id, agent_conf, driver, use_ipv6): self.router_id = router_id name = self.get_snat_ns_name(router_id) - super(SnatNamespace, self).__init__( + super().__init__( name, agent_conf, driver, use_ipv6) def create(self): - super(SnatNamespace, self).create() + super().create() # Set nonlocal_bind to 1 to allow setup applications in HA mode # for example ipsec from VPNaaS ip_lib.set_ip_nonlocal_bind_for_namespace(self.name, 1) @@ -58,4 +58,4 @@ class SnatNamespace(namespaces.Namespace): # TODO(mrsmith): delete ext-gw-port LOG.debug('DVR: destroy snat ns: %s', self.name) - super(SnatNamespace, self).delete() + super().delete() diff --git a/neutron/agent/l3/extensions/conntrack_helper.py b/neutron/agent/l3/extensions/conntrack_helper.py index e95a287e151..7f9a4010cb8 100644 --- a/neutron/agent/l3/extensions/conntrack_helper.py +++ b/neutron/agent/l3/extensions/conntrack_helper.py @@ -33,7 +33,7 @@ CONNTRACK_HELPER_PREFIX = 'cthelper-' CONNTRACK_HELPER_CHAIN_PREFIX = DEFAULT_CONNTRACK_HELPER_CHAIN + '-' -class ConntrackHelperMapping(object): +class ConntrackHelperMapping: def __init__(self): self._managed_conntrack_helpers = {} @@ -131,8 +131,8 @@ class ConntrackHelperAgentExtension(l3_extension.L3AgentExtension): :constants.MAX_IPTABLES_CHAIN_LEN_WRAP] def _install_default_rules(self, iptables_manager, version): - default_rule = '-j %s-%s' % (iptables_manager.wrap_name, - DEFAULT_CONNTRACK_HELPER_CHAIN) + default_rule = '-j {}-{}'.format(iptables_manager.wrap_name, + DEFAULT_CONNTRACK_HELPER_CHAIN) if version == constants.IPv4: iptables_manager.ipv4['raw'].add_chain( DEFAULT_CONNTRACK_HELPER_CHAIN) @@ -146,7 +146,7 @@ class ConntrackHelperAgentExtension(l3_extension.L3AgentExtension): def _get_chain_rules_list(self, conntrack_helper, wrap_name): chain_name = self._get_chain_name(conntrack_helper.id) chain_rule_list = [(DEFAULT_CONNTRACK_HELPER_CHAIN, - '-j %s-%s' % (wrap_name, chain_name))] + '-j {}-{}'.format(wrap_name, chain_name))] chain_rule_list.append((chain_name, '-p %(proto)s --dport %(dport)s -j CT ' '--helper %(helper)s' % diff --git a/neutron/agent/l3/extensions/ndp_proxy.py b/neutron/agent/l3/extensions/ndp_proxy.py index aaa7c82c04c..c8f229732c9 100644 --- a/neutron/agent/l3/extensions/ndp_proxy.py +++ b/neutron/agent/l3/extensions/ndp_proxy.py @@ -34,7 +34,7 @@ LOG = logging.getLogger(__name__) DEFAULT_NDP_PROXY_CHAIN = 'NDP' -class RouterNDPProxyMapping(object): +class RouterNDPProxyMapping: def __init__(self): self.managed_ndp_proxies = {} @@ -217,7 +217,7 @@ class NDPProxyAgentExtension(l3_extension.L3AgentExtension): cmd = ['ip', '-6', 'neigh', 'add', 'proxy', v6_address, 'dev', interface_name] ip_wrapper.netns.execute(cmd, privsep_exec=True) - accept_rule = '-i %s --destination %s -j ACCEPT' % ( + accept_rule = '-i {} --destination {} -j ACCEPT'.format( interface_name, v6_address) iptables_manager.ipv6['filter'].add_rule( DEFAULT_NDP_PROXY_CHAIN, accept_rule, top=True) @@ -251,7 +251,7 @@ class NDPProxyAgentExtension(l3_extension.L3AgentExtension): cmd = ['ip', '-6', 'neigh', 'del', 'proxy', v6_address, 'dev', interface_name] ip_wrapper.netns.execute(cmd, privsep_exec=True) - accept_rule = '-i %s --destination %s -j ACCEPT' % ( + accept_rule = '-i {} --destination {} -j ACCEPT'.format( interface_name, v6_address) iptables_manager.ipv6['filter'].remove_rule( DEFAULT_NDP_PROXY_CHAIN, accept_rule, top=True) diff --git a/neutron/agent/l3/extensions/port_forwarding.py b/neutron/agent/l3/extensions/port_forwarding.py index 2f197280e61..a56f090b3b5 100644 --- a/neutron/agent/l3/extensions/port_forwarding.py +++ b/neutron/agent/l3/extensions/port_forwarding.py @@ -36,7 +36,7 @@ PORT_FORWARDING_PREFIX = 'fip_portforwarding-' PORT_FORWARDING_CHAIN_PREFIX = 'pf-' -class RouterFipPortForwardingMapping(object): +class RouterFipPortForwardingMapping: def __init__(self): self.managed_port_forwardings = {} """ @@ -386,9 +386,9 @@ class PortForwardingAgentExtension(l3_extension.L3AgentExtension): iptables_manager.apply() - fip_id_cidrs = set([(pf.floatingip_id, - str(netaddr.IPNetwork(pf.floating_ip_address))) - for pf in port_forwardings]) + fip_id_cidrs = {(pf.floatingip_id, + str(netaddr.IPNetwork(pf.floating_ip_address))) + for pf in port_forwardings} self._sync_and_remove_fip(context, fip_id_cidrs, device, ri) self._store_local(port_forwardings, events.DELETED) @@ -431,8 +431,8 @@ class PortForwardingAgentExtension(l3_extension.L3AgentExtension): return chain_name[:constants.MAX_IPTABLES_CHAIN_LEN_WRAP] def _install_default_rules(self, iptables_manager): - default_rule = '-j %s-%s' % (iptables_manager.wrap_name, - DEFAULT_PORT_FORWARDING_CHAIN) + default_rule = '-j {}-{}'.format(iptables_manager.wrap_name, + DEFAULT_PORT_FORWARDING_CHAIN) iptables_manager.ipv4['nat'].add_chain(DEFAULT_PORT_FORWARDING_CHAIN) iptables_manager.ipv4['nat'].add_rule('PREROUTING', default_rule) iptables_manager.apply() diff --git a/neutron/agent/l3/extensions/qos/base.py b/neutron/agent/l3/extensions/qos/base.py index dfc0878adde..014b566a4fb 100644 --- a/neutron/agent/l3/extensions/qos/base.py +++ b/neutron/agent/l3/extensions/qos/base.py @@ -50,7 +50,7 @@ IP_DEFAULT_RATE = 0 IP_DEFAULT_BURST = 0 -class RateLimitMaps(object): +class RateLimitMaps: def __init__(self, lock_name): # qos_policy_2_resources = {qos_id_1: {res_1, res_2, res_3, ...} } @@ -138,7 +138,7 @@ class RateLimitMaps(object): del self.known_policies[qos_policy_id] -class L3QosAgentExtensionBase(object): +class L3QosAgentExtensionBase: SUPPORTED_RESOURCE_TYPES = [resources.QOS_POLICY] def consume_api(self, agent_api): diff --git a/neutron/agent/l3/extensions/qos/fip.py b/neutron/agent/l3/extensions/qos/fip.py index 6d7b8bcb5c2..d2d51f96036 100644 --- a/neutron/agent/l3/extensions/qos/fip.py +++ b/neutron/agent/l3/extensions/qos/fip.py @@ -60,7 +60,7 @@ class RouterFipRateLimitMaps(qos_base.RateLimitMaps): """ self.ingress_ratelimits = {} self.egress_ratelimits = {} - super(RouterFipRateLimitMaps, self).__init__(self.LOCK_NAME) + super().__init__(self.LOCK_NAME) def get_router_id_by_fip(self, fip_res): @@ -74,7 +74,7 @@ class RouterFipRateLimitMaps(qos_base.RateLimitMaps): @lockutils.synchronized(self.lock_name) def _get_fips_by_router_id(): - return self._router_2_fips.get(router_id, set([])) + return self._router_2_fips.get(router_id, set()) return _get_fips_by_router_id() @@ -94,7 +94,7 @@ class RouterFipRateLimitMaps(qos_base.RateLimitMaps): @lockutils.synchronized(self.lock_name) def _delete_fips(): - router_ids = set([]) + router_ids = set() for fip_res in fips: router_id = self._fips_2_router.pop(fip_res, None) if router_id: @@ -337,7 +337,7 @@ class FipQosAgentExtension(qos_base.L3QosAgentExtensionBase, router_info.get_port_forwarding_fips()) current_fips = self.fip_qos_map.get_fips_by_router_id( router_info.router_id) - new_fips = set([]) + new_fips = set() for fip in floating_ips: fip_res = FipResource(fip['id'], fip['floating_ip_address']) new_fips.add(fip_res) diff --git a/neutron/agent/l3/fip_rule_priority_allocator.py b/neutron/agent/l3/fip_rule_priority_allocator.py index 8948067b3b7..4ce540bd827 100644 --- a/neutron/agent/l3/fip_rule_priority_allocator.py +++ b/neutron/agent/l3/fip_rule_priority_allocator.py @@ -15,7 +15,7 @@ from neutron.agent.l3.item_allocator import ItemAllocator -class FipPriority(object): +class FipPriority: def __init__(self, index): self.index = index @@ -48,9 +48,9 @@ class FipRulePriorityAllocator(ItemAllocator): using ',' as the delimiter and FipRulePriorityAllocator as the class type """ - pool = set(FipPriority(str(s)) for s in range(priority_rule_start, - priority_rule_end)) + pool = {FipPriority(str(s)) for s in range(priority_rule_start, + priority_rule_end)} - super(FipRulePriorityAllocator, self).__init__(data_store_path, - FipPriority, - pool) + super().__init__(data_store_path, + FipPriority, + pool) diff --git a/neutron/agent/l3/ha.py b/neutron/agent/l3/ha.py index f0369193818..fb660bf2dbb 100644 --- a/neutron/agent/l3/ha.py +++ b/neutron/agent/l3/ha.py @@ -39,7 +39,7 @@ TRANSLATION_MAP = {'primary': constants.HA_ROUTER_STATE_ACTIVE, 'unknown': constants.HA_ROUTER_STATE_UNKNOWN} -class KeepalivedStateChangeHandler(object): +class KeepalivedStateChangeHandler: def __init__(self, agent): self.agent = agent @@ -56,7 +56,7 @@ class KeepalivedStateChangeHandler(object): self.agent.enqueue_state_change(router_id, state) -class L3AgentKeepalivedStateChangeServer(object): +class L3AgentKeepalivedStateChangeServer: def __init__(self, agent, conf): self.agent = agent self.conf = conf @@ -80,10 +80,10 @@ class L3AgentKeepalivedStateChangeServer(object): @registry.has_registry_receivers -class AgentMixin(object): +class AgentMixin: def __init__(self, host): self._init_ha_conf_path() - super(AgentMixin, self).__init__(host) + super().__init__(host) # BatchNotifier queue is needed to ensure that the HA router # state change sequence is under the proper order. self.state_change_notifier = batch_notifier.BatchNotifier( @@ -254,8 +254,8 @@ class AgentMixin(object): ri.disable_radvd() def notify_server(self, batched_events): - translated_states = dict((router_id, TRANSLATION_MAP[state]) for - router_id, state in batched_events) + translated_states = {router_id: TRANSLATION_MAP[state] for + router_id, state in batched_events} LOG.debug('Updating server with HA routers states %s', translated_states) self.plugin_rpc.update_ha_routers_states( diff --git a/neutron/agent/l3/ha_router.py b/neutron/agent/l3/ha_router.py index c5476c3c0b6..a780641ef14 100644 --- a/neutron/agent/l3/ha_router.py +++ b/neutron/agent/l3/ha_router.py @@ -64,7 +64,7 @@ class HaRouterNamespace(namespaces.RouterNamespace): which cause lost connectivity to Floating IPs. """ def create(self): - super(HaRouterNamespace, self).create(ipv6_forwarding=False) + super().create(ipv6_forwarding=False) # HA router namespaces should have ip_nonlocal_bind enabled ip_lib.set_ip_nonlocal_bind_for_namespace(self.name, 1) # Linux should not automatically assign link-local addr for HA routers @@ -76,7 +76,7 @@ class HaRouterNamespace(namespaces.RouterNamespace): class HaRouter(router.RouterInfo): def __init__(self, *args, **kwargs): - super(HaRouter, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.ha_port = None self.keepalived_manager = None @@ -119,14 +119,14 @@ class HaRouter(router.RouterInfo): if self._ha_state: return self._ha_state try: - with open(self.ha_state_path, 'r') as f: + with open(self.ha_state_path) as f: # TODO(haleyb): put old code back after a couple releases, # Y perhaps, just for backwards-compat # self._ha_state = f.read() ha_state = f.read() ha_state = 'primary' if ha_state == 'master' else ha_state self._ha_state = ha_state - except (OSError, IOError) as error: + except OSError as error: LOG.debug('Error while reading HA state for %s: %s', self.router_id, error) return self._ha_state or 'unknown' @@ -137,7 +137,7 @@ class HaRouter(router.RouterInfo): try: with open(self.ha_state_path, 'w') as f: f.write(new_state) - except (OSError, IOError) as error: + except OSError as error: LOG.error('Error while writing HA state for %s: %s', self.router_id, error) @@ -161,7 +161,7 @@ class HaRouter(router.RouterInfo): self.router_id) LOG.exception(msg) raise Exception(msg) - super(HaRouter, self).initialize(process_monitor) + super().initialize(process_monitor) self.set_ha_port() self._init_keepalived_manager(process_monitor) @@ -288,7 +288,7 @@ class HaRouter(router.RouterInfo): route['destination'], route['nexthop']) for route in new_routes] if self.router.get('distributed', False): - super(HaRouter, self).routes_updated(old_routes, new_routes) + super().routes_updated(old_routes, new_routes) self.keepalived_manager.get_process().reload_cfg() def _add_default_gw_virtual_route(self, ex_gw_port, interface_name): @@ -315,7 +315,7 @@ class HaRouter(router.RouterInfo): def _add_extra_subnet_onlink_routes(self, ex_gw_port, interface_name): extra_subnets = ex_gw_port.get('extra_subnets', []) instance = self._get_keepalived_instance() - onlink_route_cidrs = set(s['cidr'] for s in extra_subnets) + onlink_route_cidrs = {s['cidr'] for s in extra_subnets} instance.virtual_routes.extra_subnets = [ keepalived.KeepalivedVirtualRoute( onlink_route_cidr, None, interface_name, scope='link') for @@ -375,7 +375,7 @@ class HaRouter(router.RouterInfo): self._remove_vip(ip_cidr) to = common_utils.cidr_to_ip(ip_cidr) if device.addr.list(to=to): - super(HaRouter, self).remove_floating_ip(device, ip_cidr) + super().remove_floating_ip(device, ip_cidr) def internal_network_updated(self, port): interface_name = self.get_internal_device_name(port['id']) @@ -407,7 +407,7 @@ class HaRouter(router.RouterInfo): port, self.get_internal_device_name, router.INTERNAL_DEV_PREFIX) def internal_network_removed(self, port): - super(HaRouter, self).internal_network_removed(port) + super().internal_network_removed(port) interface_name = self.get_internal_device_name(port['id']) self._clear_vips(interface_name) @@ -483,8 +483,8 @@ class HaRouter(router.RouterInfo): def _get_filtered_dict(d, ignore): return {k: v for k, v in d.items() if k not in ignore} - keys_to_ignore = set([portbindings.HOST_ID, timestamp.UPDATED, - revisions.REVISION]) + keys_to_ignore = {portbindings.HOST_ID, timestamp.UPDATED, + revisions.REVISION} port1_filtered = _get_filtered_dict(port1, keys_to_ignore) port2_filtered = _get_filtered_dict(port2, keys_to_ignore) return port1_filtered == port2_filtered @@ -513,8 +513,8 @@ class HaRouter(router.RouterInfo): self._clear_vips(interface_name) if self.ha_state == 'primary': - super(HaRouter, self).external_gateway_removed(ex_gw_port, - interface_name) + super().external_gateway_removed(ex_gw_port, + interface_name) else: # We are not the primary node, so no need to delete ip addresses. self.driver.unplug(interface_name, @@ -526,7 +526,7 @@ class HaRouter(router.RouterInfo): self.destroy_state_change_monitor(self.process_monitor) self.disable_keepalived() self.ha_network_removed() - super(HaRouter, self).delete() + super().delete() def set_ha_port(self): ha_port = self.router.get(n_consts.HA_INTERFACE_KEY) @@ -541,7 +541,7 @@ class HaRouter(router.RouterInfo): self.ha_port = ha_port def process(self): - super(HaRouter, self).process() + super().process() self.set_ha_port() LOG.debug("Processing HA router %(router_id)s with HA port: %(port)s", @@ -555,4 +555,4 @@ class HaRouter(router.RouterInfo): def enable_radvd(self, internal_ports=None): if (self.keepalived_manager.get_process().active and self.ha_state == 'primary'): - super(HaRouter, self).enable_radvd(internal_ports) + super().enable_radvd(internal_ports) diff --git a/neutron/agent/l3/item_allocator.py b/neutron/agent/l3/item_allocator.py index 3689f541a2d..cdd84fa03cc 100644 --- a/neutron/agent/l3/item_allocator.py +++ b/neutron/agent/l3/item_allocator.py @@ -21,7 +21,7 @@ from neutron._i18n import _ LOG = logging.getLogger(__name__) -class ItemAllocator(object): +class ItemAllocator: """Manages allocation of items from a pool Some of the allocations such as link local addresses used for routing @@ -120,8 +120,9 @@ class ItemAllocator(object): self._write_allocations() def _write_allocations(self): - current = ["%s,%s\n" % (k, v) for k, v in self.allocations.items()] - remembered = ["%s,%s\n" % (k, v) for k, v in self.remembered.items()] + current = ["{},{}\n".format(k, v) for k, v in self.allocations.items()] + remembered = ["{},{}\n".format(k, v) + for k, v in self.remembered.items()] current.extend(remembered) self._write(current) diff --git a/neutron/agent/l3/keepalived_state_change.py b/neutron/agent/l3/keepalived_state_change.py index 6575926e061..de32c13dede 100644 --- a/neutron/agent/l3/keepalived_state_change.py +++ b/neutron/agent/l3/keepalived_state_change.py @@ -60,8 +60,8 @@ class MonitorDaemon(daemon.Daemon): self.event_started = threading.Event() self.queue = queue.Queue() self._initial_state = None - super(MonitorDaemon, self).__init__(pidfile, uuid=router_id, - user=user, group=group) + super().__init__(pidfile, uuid=router_id, + user=user, group=group) @property def initial_state(self): @@ -163,7 +163,7 @@ class MonitorDaemon(daemon.Daemon): def handle_sigterm(self, signum, frame): self.event_stop.set() self._thread_read_queue.join(timeout=5) - super(MonitorDaemon, self).handle_sigterm(signum, frame) + super().handle_sigterm(signum, frame) def configure(conf): diff --git a/neutron/agent/l3/l3_agent_extension_api.py b/neutron/agent/l3/l3_agent_extension_api.py index 358c9565f6d..f519e7be191 100644 --- a/neutron/agent/l3/l3_agent_extension_api.py +++ b/neutron/agent/l3/l3_agent_extension_api.py @@ -16,7 +16,7 @@ from neutron.agent.linux import ip_lib -class L3AgentExtensionAPI(object): +class L3AgentExtensionAPI: '''Implements the Agent API for the L3 agent. Extensions can gain access to this API by overriding the consume_api diff --git a/neutron/agent/l3/l3_agent_extensions_manager.py b/neutron/agent/l3/l3_agent_extensions_manager.py index 9e5bf8452a1..db5e9299187 100644 --- a/neutron/agent/l3/l3_agent_extensions_manager.py +++ b/neutron/agent/l3/l3_agent_extensions_manager.py @@ -34,8 +34,7 @@ class L3AgentExtensionsManager(agent_ext_manager.AgentExtensionsManager): """Manage l3 agent extensions.""" def __init__(self, conf): - super(L3AgentExtensionsManager, - self).__init__(conf, L3_AGENT_EXT_MANAGER_NAMESPACE) + super().__init__(conf, L3_AGENT_EXT_MANAGER_NAMESPACE) extensions = [] for extension in self: if not isinstance(extension.obj, (l3_extension.L3AgentExtension,)): diff --git a/neutron/agent/l3/link_local_allocator.py b/neutron/agent/l3/link_local_allocator.py index f313aa33503..5aa3c2b1ea3 100644 --- a/neutron/agent/l3/link_local_allocator.py +++ b/neutron/agent/l3/link_local_allocator.py @@ -19,14 +19,15 @@ from neutron.agent.l3.item_allocator import ItemAllocator class LinkLocalAddressPair(netaddr.IPNetwork): def __init__(self, addr): - super(LinkLocalAddressPair, self).__init__(addr) + super().__init__(addr) def get_pair(self): """Builds an address pair from the first and last addresses. """ # TODO(kevinbenton): the callers of this seem only interested in an IP, # so we should just return two IPAddresses. - return (netaddr.IPNetwork("%s/%s" % (self.network, self.prefixlen)), - netaddr.IPNetwork("%s/%s" % (self[-1], self.prefixlen))) + return ( + netaddr.IPNetwork("{}/{}".format(self.network, self.prefixlen)), + netaddr.IPNetwork("{}/{}".format(self[-1], self.prefixlen))) class LinkLocalAllocator(ItemAllocator): @@ -46,7 +47,7 @@ class LinkLocalAllocator(ItemAllocator): class type """ subnet = netaddr.IPNetwork(subnet) - pool = set(LinkLocalAddressPair(s) for s in subnet.subnet(31)) - super(LinkLocalAllocator, self).__init__(data_store_path, - LinkLocalAddressPair, - pool) + pool = {LinkLocalAddressPair(s) for s in subnet.subnet(31)} + super().__init__(data_store_path, + LinkLocalAddressPair, + pool) diff --git a/neutron/agent/l3/namespace_manager.py b/neutron/agent/l3/namespace_manager.py index 98af154928f..f42e9410960 100644 --- a/neutron/agent/l3/namespace_manager.py +++ b/neutron/agent/l3/namespace_manager.py @@ -21,7 +21,7 @@ from neutron.agent.linux import ip_lib LOG = logging.getLogger(__name__) -class NamespaceManager(object): +class NamespaceManager: """Keeps track of namespaces that need to be cleaned up. @@ -115,7 +115,7 @@ class NamespaceManager(object): """Get a set of all namespaces on host managed by this manager.""" try: namespaces = ip_lib.list_network_namespaces() - return set(ns for ns in namespaces if self.is_managed(ns)) + return {ns for ns in namespaces if self.is_managed(ns)} except RuntimeError: LOG.exception('RuntimeError in obtaining namespace list for ' 'namespace cleanup.') diff --git a/neutron/agent/l3/namespaces.py b/neutron/agent/l3/namespaces.py index 10d3922be6e..9aa5d765cd2 100644 --- a/neutron/agent/l3/namespaces.py +++ b/neutron/agent/l3/namespaces.py @@ -79,7 +79,7 @@ def check_ns_existence(f): return wrapped -class Namespace(object): +class Namespace: def __init__(self, name, agent_conf, driver, use_ipv6): self.name = name @@ -126,7 +126,7 @@ class RouterNamespace(Namespace): def __init__(self, router_id, agent_conf, driver, use_ipv6): self.router_id = router_id name = self._get_ns_name(router_id) - super(RouterNamespace, self).__init__( + super().__init__( name, agent_conf, driver, use_ipv6) @classmethod @@ -149,4 +149,4 @@ class RouterNamespace(Namespace): namespace=self.name, prefix=EXTERNAL_DEV_PREFIX) - super(RouterNamespace, self).delete() + super().delete() diff --git a/neutron/agent/l3/router_info.py b/neutron/agent/l3/router_info.py index d535f796469..91206a790f9 100644 --- a/neutron/agent/l3/router_info.py +++ b/neutron/agent/l3/router_info.py @@ -44,7 +44,7 @@ ADDRESS_SCOPE_MARK_ID_MAX = 2048 DEFAULT_ADDRESS_SCOPE = "noscope" -class BaseRouterInfo(object, metaclass=abc.ABCMeta): +class BaseRouterInfo(metaclass=abc.ABCMeta): def __init__(self, agent, @@ -130,8 +130,8 @@ class RouterInfo(BaseRouterInfo): agent_conf, interface_driver, use_ipv6=False): - super(RouterInfo, self).__init__(agent, router_id, router, agent_conf, - interface_driver, use_ipv6) + super().__init__(agent, router_id, router, agent_conf, + interface_driver, use_ipv6) self.ex_gw_port = None self.fip_map = {} @@ -158,7 +158,7 @@ class RouterInfo(BaseRouterInfo): self.qos_gateway_ips = set() def initialize(self, process_monitor): - super(RouterInfo, self).initialize(process_monitor) + super().initialize(process_monitor) self.radvd = ra.DaemonMonitor(self.router_id, self.ns_name, process_monitor, @@ -266,7 +266,8 @@ class RouterInfo(BaseRouterInfo): def floating_forward_rules(self, fip): fixed_ip = fip['fixed_ip_address'] floating_ip = fip['floating_ip_address'] - to_source = '-s %s/32 -j SNAT --to-source %s' % (fixed_ip, floating_ip) + to_source = '-s {}/32 -j SNAT --to-source {}'.format( + fixed_ip, floating_ip) if self.iptables_manager.random_fully: to_source += ' --random-fully' return [('PREROUTING', '-d %s/32 -j DNAT --to-destination %s' % @@ -277,7 +278,7 @@ class RouterInfo(BaseRouterInfo): def floating_mangle_rules(self, floating_ip, fixed_ip, internal_mark): mark_traffic_to_floating_ip = ( - 'floatingip', '-d %s/32 -j MARK --set-xmark %s' % ( + 'floatingip', '-d {}/32 -j MARK --set-xmark {}'.format( floating_ip, internal_mark)) mark_traffic_from_fixed_ip = ( 'FORWARD', '-s %s/32 -j $float-snat' % fixed_ip) @@ -293,7 +294,7 @@ class RouterInfo(BaseRouterInfo): mark_id = self._address_scope_to_mark_id[address_scope] # NOTE: Address scopes use only the upper 16 bits of the 32 fwmark - return "%s/%s" % (hex(mark_id << 16), ADDRESS_SCOPE_MARK_MASK) + return "{}/{}".format(hex(mark_id << 16), ADDRESS_SCOPE_MARK_MASK) def get_port_address_scope_mark(self, port): """Get the IP version 4 and 6 address scope mark for the port @@ -427,7 +428,7 @@ class RouterInfo(BaseRouterInfo): device.delete_addr_and_conntrack_state(ip_cidr) def get_router_cidrs(self, device): - return set([addr['cidr'] for addr in device.addr.list()]) + return {addr['cidr'] for addr in device.addr.list()} def get_centralized_fip_cidr_set(self): return set() @@ -655,18 +656,18 @@ class RouterInfo(BaseRouterInfo): namespace=self.ns_name) def address_scope_mangle_rule(self, device_name, mark_mask): - return '-i %s -j MARK --set-xmark %s' % (device_name, mark_mask) + return '-i {} -j MARK --set-xmark {}'.format(device_name, mark_mask) def address_scope_filter_rule(self, device_name, mark_mask): - return '-o %s -m mark ! --mark %s -j DROP' % ( + return '-o {} -m mark ! --mark {} -j DROP'.format( device_name, mark_mask) def _process_internal_ports(self): - existing_port_ids = set(p['id'] for p in self.internal_ports) + existing_port_ids = {p['id'] for p in self.internal_ports} internal_ports = self.router.get(lib_constants.INTERFACE_KEY, []) - current_port_ids = set(p['id'] for p in internal_ports - if p['admin_state_up']) + current_port_ids = {p['id'] for p in internal_ports + if p['admin_state_up']} new_port_ids = current_port_ids - existing_port_ids new_ports = [p for p in internal_ports if p['id'] in new_port_ids] @@ -731,10 +732,10 @@ class RouterInfo(BaseRouterInfo): self.enable_radvd(internal_ports) existing_devices = self._get_existing_devices() - current_internal_devs = set(n for n in existing_devices - if n.startswith(INTERNAL_DEV_PREFIX)) - current_port_devs = set(self.get_internal_device_name(port_id) - for port_id in current_port_ids) + current_internal_devs = {n for n in existing_devices + if n.startswith(INTERNAL_DEV_PREFIX)} + current_port_devs = {self.get_internal_device_name(port_id) + for port_id in current_port_ids} stale_devs = current_internal_devs - current_port_devs for stale_dev in stale_devs: LOG.debug('Deleting stale internal router device: %s', @@ -959,7 +960,8 @@ class RouterInfo(BaseRouterInfo): snat_internal_traffic_to_floating_ip] def external_gateway_nat_snat_rules(self, ex_gw_ip, interface_name): - to_source = '-o %s -j SNAT --to-source %s' % (interface_name, ex_gw_ip) + to_source = '-o {} -j SNAT --to-source {}'.format( + interface_name, ex_gw_ip) if self.iptables_manager.random_fully: to_source += ' --random-fully' return [('snat', to_source)] @@ -1135,7 +1137,7 @@ class RouterInfo(BaseRouterInfo): 'value': self.agent_conf.metadata_access_mark, 'mask': lib_constants.ROUTER_MARK_MASK}) drop_non_local_metadata = ( - '-m mark --mark %s/%s -j DROP' % ( + '-m mark --mark {}/{} -j DROP'.format( self.agent_conf.metadata_access_mark, lib_constants.ROUTER_MARK_MASK)) self.iptables_manager.ipv4['mangle'].add_rule( @@ -1154,7 +1156,7 @@ class RouterInfo(BaseRouterInfo): 'value': self.agent_conf.metadata_access_mark, 'mask': lib_constants.ROUTER_MARK_MASK}) drop_non_local_v6_metadata = ( - '-m mark --mark %s/%s -j DROP' % ( + '-m mark --mark {}/{} -j DROP'.format( self.agent_conf.metadata_access_mark, lib_constants.ROUTER_MARK_MASK)) self.iptables_manager.ipv6['mangle'].add_rule( @@ -1265,7 +1267,7 @@ class RouterInfo(BaseRouterInfo): return # Prevents snat within the same address scope - rule = '-o %s -m connmark --mark %s -j ACCEPT' % ( + rule = '-o {} -m connmark --mark {} -j ACCEPT'.format( external_devicename, self.get_address_scope_mark_mask(address_scope)) iptables_manager.ipv4['nat'].add_rule('snat', rule) @@ -1311,8 +1313,8 @@ class RouterInfo(BaseRouterInfo): # Update ex_gw_port on the router info cache self.ex_gw_port = self.get_ex_gw_port() - self.fip_map = dict((fip['floating_ip_address'], - fip['fixed_ip_address']) - for fip in self.get_floating_ips()) + self.fip_map = {fip['floating_ip_address']: + fip['fixed_ip_address'] + for fip in self.get_floating_ips()} self.fip_managed_by_port_forwardings = self.router.get( 'fip_managed_by_port_forwardings') diff --git a/neutron/agent/linux/bridge_lib.py b/neutron/agent/linux/bridge_lib.py index afc58f31c8b..ed56f871d75 100644 --- a/neutron/agent/linux/bridge_lib.py +++ b/neutron/agent/linux/bridge_lib.py @@ -60,9 +60,9 @@ def is_bridged_interface(interface): def get_interface_ifindex(interface): try: - with open(os.path.join(BRIDGE_FS, interface, 'ifindex'), 'r') as fh: + with open(os.path.join(BRIDGE_FS, interface, 'ifindex')) as fh: return int(fh.read().strip()) - except (IOError, ValueError): + except (OSError, ValueError): pass @@ -129,7 +129,7 @@ class BridgeDevice(ip_lib.IPDevice): return [] -class FdbInterface(object): +class FdbInterface: """Provide basic functionality to edit the FDB table""" @staticmethod diff --git a/neutron/agent/linux/daemon.py b/neutron/agent/linux/daemon.py index eb69d5bf922..88b7f72f70f 100644 --- a/neutron/agent/linux/daemon.py +++ b/neutron/agent/linux/daemon.py @@ -117,7 +117,7 @@ def drop_privileges(user=None, group=None): {'uid': os.getuid(), 'gid': os.getgid()}) -class Pidfile(object): +class Pidfile: def __init__(self, pidfile, procname, uuid=None): self.pidfile = pidfile self.procname = procname @@ -125,7 +125,7 @@ class Pidfile(object): try: self.fd = os.open(pidfile, os.O_CREAT | os.O_RDWR) fcntl.flock(self.fd, fcntl.LOCK_EX | fcntl.LOCK_NB) - except IOError: + except OSError: LOG.exception("Error while handling pidfile: %s", pidfile) sys.exit(1) @@ -155,15 +155,15 @@ class Pidfile(object): cmdline = '/proc/%s/cmdline' % pid try: - with open(cmdline, "r") as f: + with open(cmdline) as f: exec_out = f.readline() return self.procname in exec_out and (not self.uuid or self.uuid in exec_out) - except IOError: + except OSError: return False -class Daemon(object): +class Daemon: """A generic daemon class. Usage: subclass the Daemon class and override the run() method @@ -250,7 +250,7 @@ class Daemon(object): self.run() def _set_process_title(self): - proctitle = "%s (%s)" % (self.procname, self._parent_proctitle) + proctitle = "{} ({})".format(self.procname, self._parent_proctitle) setproctitle.setproctitle(proctitle) def run(self): diff --git a/neutron/agent/linux/dhcp.py b/neutron/agent/linux/dhcp.py index 1a36c4444eb..5860a1fd5ef 100644 --- a/neutron/agent/linux/dhcp.py +++ b/neutron/agent/linux/dhcp.py @@ -105,7 +105,7 @@ class DictModel(collections.abc.MutableMapping): if isinstance(value, (list, tuple)): # Keep the same type but convert dicts to DictModels self._dictmodel_internal_storage[key] = type(value)( - (upgrade(item) for item in value) + upgrade(item) for item in value ) elif needs_upgrade(value): # Change dict instance values to DictModel instance values @@ -116,14 +116,14 @@ class DictModel(collections.abc.MutableMapping): def __getattr__(self, name): try: if name == '_dictmodel_internal_storage': - return super(DictModel, self).__getattr__(name) + return super().__getattr__(name) return self.__getitem__(name) except KeyError as e: raise AttributeError(e) def __setattr__(self, name, value): if name == '_dictmodel_internal_storage': - super(DictModel, self).__setattr__(name, value) + super().__setattr__(name, value) else: self._dictmodel_internal_storage[name] = value @@ -131,7 +131,7 @@ class DictModel(collections.abc.MutableMapping): del self._dictmodel_internal_storage[name] def __str__(self): - pairs = ['%s=%s' % (k, v) for k, v in + pairs = ['{}={}'.format(k, v) for k, v in self._dictmodel_internal_storage.items()] return ', '.join(sorted(pairs)) @@ -169,9 +169,9 @@ class DictModel(collections.abc.MutableMapping): class NetModel(DictModel): def __init__(self, *args, **kwargs): - super(NetModel, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) - self._ns_name = "%s%s" % (NS_PREFIX, self.id) + self._ns_name = "{}{}".format(NS_PREFIX, self.id) @property def namespace(self): @@ -186,7 +186,7 @@ class NetModel(DictModel): return self['tenant_id'] -class DhcpBase(object, metaclass=abc.ABCMeta): +class DhcpBase(metaclass=abc.ABCMeta): def __init__(self, conf, network, process_monitor, version=None, plugin=None, segment=None): @@ -255,8 +255,8 @@ class DhcpLocalProcess(DhcpBase, metaclass=abc.ABCMeta): def __init__(self, conf, network, process_monitor, version=None, plugin=None, segment=None): - super(DhcpLocalProcess, self).__init__(conf, network, process_monitor, - version, plugin, segment) + super().__init__(conf, network, process_monitor, + version, plugin, segment) self.confs_dir = self.get_confs_dir(conf) if self.segment: # In case of multi-segments support we want a dns process per vlan. @@ -304,7 +304,8 @@ class DhcpLocalProcess(DhcpBase, metaclass=abc.ABCMeta): # NOTE(sahid): Keep the order to match directory path. This is used # by external_process.ProcessManager to check whether the process # is active. - return "%s/%s" % (self.segment.segmentation_id, self.network.id) + return "{}/{}".format( + self.segment.segmentation_id, self.network.id) return self.network.id def _remove_config_files(self): @@ -399,11 +400,11 @@ class DhcpLocalProcess(DhcpBase, metaclass=abc.ABCMeta): """A helper function to read a value from one of the state files.""" file_name = self.get_conf_file_name(kind) try: - with open(file_name, 'r') as f: + with open(file_name) as f: return converter(f.read()) if converter else f.read() except ValueError: LOG.debug("Unable to convert value in %s", file_name) - except IOError: + except OSError: LOG.debug("Unable to access %s", file_name) return None @@ -791,7 +792,7 @@ class Dnsmasq(DhcpLocalProcess): ip_addresses[0].replace('.', '-').replace(':', '-')) fqdn = hostname if self.conf.dns_domain: - fqdn = '%s.%s' % (fqdn, self.conf.dns_domain) + fqdn = '{}.{}'.format(fqdn, self.conf.dns_domain) return hostname, fqdn @@ -812,9 +813,9 @@ class Dnsmasq(DhcpLocalProcess): tag, # A dhcp-host tag to add to the configuration if supported ) """ - v6_nets = dict((subnet.id, subnet) for subnet in - self._get_all_subnets(self.network) - if subnet.ip_version == 6) + v6_nets = {subnet.id: subnet for subnet in + self._get_all_subnets(self.network) + if subnet.ip_version == 6} for port in self.network.ports: if not port_requires_dhcp_configuration(port): @@ -967,7 +968,7 @@ class Dnsmasq(DhcpLocalProcess): port, alloc, hostname, name, no_dhcp, no_opts, tag = host_tuple if no_dhcp: if not no_opts and self._get_port_extra_dhcp_opts(port): - buf.write('%s,%s%s%s\n' % ( + buf.write('{},{}{}{}\n'.format( port.mac_address, tag, 'set:', self._PORT_TAG_PREFIX % port.id)) continue @@ -1033,7 +1034,7 @@ class Dnsmasq(DhcpLocalProcess): ips = self._parse_ip_addresses(host[2:]) for ip in ips: leases.add((ip, mac, client_id)) - except (OSError, IOError): + except OSError: LOG.debug('Error while reading hosts file %s', filename) return leases @@ -1198,7 +1199,8 @@ class Dnsmasq(DhcpLocalProcess): # It is compulsory to write the `fqdn` before the `hostname` in # order to obtain it in PTR responses. if alloc: - buf.write('%s\t%s %s\n' % (alloc.ip_address, fqdn, hostname)) + buf.write('{}\t{} {}\n'.format( + alloc.ip_address, fqdn, hostname)) addn_hosts = self.get_conf_file_name('addn_hosts') file_utils.replace_file(addn_hosts, buf.getvalue()) return addn_hosts @@ -1277,7 +1279,8 @@ class Dnsmasq(DhcpLocalProcess): if not gateway: gateway = hr.nexthop else: - host_routes.append("%s,%s" % (hr.destination, hr.nexthop)) + host_routes.append("{},{}".format( + hr.destination, hr.nexthop)) # Determine metadata port route if subnet.ip_version == constants.IP_VERSION_4: @@ -1293,7 +1296,7 @@ class Dnsmasq(DhcpLocalProcess): elif (self.conf.force_metadata or (isolated_subnets[subnet.id] and - self.conf.enable_isolated_metadata)): + self.conf.enable_isolated_metadata)): subnet_dhcp_ip = subnet_to_interface_ip.get(subnet.id) if subnet_dhcp_ip: metadata_route_ip = subnet_dhcp_ip @@ -1303,7 +1306,8 @@ class Dnsmasq(DhcpLocalProcess): if metadata_route_ip: host_routes.append( - '%s,%s' % (constants.METADATA_CIDR, metadata_route_ip) + '{},{}'.format(constants.METADATA_CIDR, + metadata_route_ip) ) for s in self._get_all_subnets(self.network): @@ -1315,8 +1319,8 @@ class Dnsmasq(DhcpLocalProcess): if host_routes: if gateway: - host_routes.append("%s,%s" % (constants.IPv4_ANY, - gateway)) + host_routes.append("{},{}".format(constants.IPv4_ANY, + gateway)) options.append( self._format_option( subnet.ip_version, @@ -1345,9 +1349,9 @@ class Dnsmasq(DhcpLocalProcess): dhcp_ips = collections.defaultdict(list) for port in self.network.ports: if self._get_port_extra_dhcp_opts(port): - port_ip_versions = set( - [netaddr.IPAddress(ip.ip_address).version - for ip in port.fixed_ips]) + port_ip_versions = { + netaddr.IPAddress(ip.ip_address).version + for ip in port.fixed_ips} for opt in port.extra_dhcp_opts: if opt.opt_name in (edo_ext.DHCP_OPT_CLIENT_ID, DHCP_OPT_CLIENT_ID_NUM, @@ -1391,10 +1395,10 @@ class Dnsmasq(DhcpLocalProcess): return options def _make_subnet_interface_ip_map(self): - subnet_lookup = dict( - (netaddr.IPNetwork(subnet.cidr), subnet.id) + subnet_lookup = { + netaddr.IPNetwork(subnet.cidr): subnet.id for subnet in self.network.subnets - ) + } retval = {} @@ -1448,7 +1452,7 @@ class Dnsmasq(DhcpLocalProcess): """ isolated_subnets = collections.defaultdict(lambda: True) all_subnets = cls._get_all_subnets(network) - subnets = dict((subnet.id, subnet) for subnet in all_subnets) + subnets = {subnet.id: subnet for subnet in all_subnets} for port in network.ports: if port.device_owner not in constants.ROUTER_INTERFACE_OWNERS: @@ -1511,7 +1515,7 @@ class Dnsmasq(DhcpLocalProcess): return any(isolated_subnets[s.id] for s in dhcp_subnets) -class DeviceManager(object): +class DeviceManager: def __init__(self, conf, plugin): self.conf = conf @@ -1575,7 +1579,7 @@ class DeviceManager(object): subnet.cidr, gateway)) if is_old_gateway_not_in_subnet: onlink = device.route.list_onlink_routes(ip_version) - existing_onlink_routes = set(r['cidr'] for r in onlink) + existing_onlink_routes = {r['cidr'] for r in onlink} if gateway in existing_onlink_routes: device.route.delete_route(gateway, scope='link') @@ -1635,7 +1639,7 @@ class DeviceManager(object): # Compare what the subnets should be against what is already # on the port. dhcp_enabled_subnet_ids = set(dhcp_subnets) - port_subnet_ids = set(ip.subnet_id for ip in port.fixed_ips) + port_subnet_ids = {ip.subnet_id for ip in port.fixed_ips} # If those differ, we need to call update. if dhcp_enabled_subnet_ids != port_subnet_ids: @@ -1865,7 +1869,7 @@ class DeviceManager(object): for fixed_ip in port.fixed_ips: subnet = fixed_ip.subnet net = netaddr.IPNetwork(subnet.cidr) - ip_cidr = '%s/%s' % (fixed_ip.ip_address, net.prefixlen) + ip_cidr = '{}/{}'.format(fixed_ip.ip_address, net.prefixlen) ip_cidrs.append(ip_cidr) if self.driver.use_gateway_ips: @@ -1877,7 +1881,7 @@ class DeviceManager(object): gateway = subnet.gateway_ip if gateway: net = netaddr.IPNetwork(subnet.cidr) - ip_cidrs.append('%s/%s' % (gateway, net.prefixlen)) + ip_cidrs.append('{}/{}'.format(gateway, net.prefixlen)) if self.conf.force_metadata or self.conf.enable_isolated_metadata: ip_cidrs.append(constants.METADATA_CIDR) diff --git a/neutron/agent/linux/dibbler.py b/neutron/agent/linux/dibbler.py index b3aec946895..abfa067c9dd 100644 --- a/neutron/agent/linux/dibbler.py +++ b/neutron/agent/linux/dibbler.py @@ -70,12 +70,12 @@ exec neutron-pd-notify $1 {{ prefix_path }} {{ l3_agent_pid }} class PDDibbler(pd_driver.PDDriverBase): def __init__(self, router_id, subnet_id, ri_ifname): - super(PDDibbler, self).__init__(router_id, subnet_id, ri_ifname) - self.requestor_id = "%s:%s:%s" % (self.router_id, - self.subnet_id, - self.ri_ifname) - self.dibbler_client_working_area = "%s/%s" % (cfg.CONF.pd_confs, - self.requestor_id) + super().__init__(router_id, subnet_id, ri_ifname) + self.requestor_id = "{}:{}:{}".format(self.router_id, + self.subnet_id, + self.ri_ifname) + self.dibbler_client_working_area = "{}/{}".format(cfg.CONF.pd_confs, + self.requestor_id) self.prefix_path = "%s/prefix" % self.dibbler_client_working_area self.pid_path = "%s/client.pid" % self.dibbler_client_working_area self.converted_subnet_id = self.subnet_id.replace('-', '') diff --git a/neutron/agent/linux/external_process.py b/neutron/agent/linux/external_process.py index f58f186d049..47f50cd543b 100644 --- a/neutron/agent/linux/external_process.py +++ b/neutron/agent/linux/external_process.py @@ -38,7 +38,7 @@ agent_cfg.register_external_process_opts() agent_cfg.register_process_monitor_opts(cfg.CONF) -class MonitoredProcess(object, metaclass=abc.ABCMeta): +class MonitoredProcess(metaclass=abc.ABCMeta): @property @abc.abstractmethod def active(self): @@ -76,7 +76,7 @@ class ProcessManager(MonitoredProcess): self.service_pid_fname = 'pid' self.service = DEFAULT_SERVICE_NAME - process_tag = '%s-%s' % (self.service, self.uuid) + process_tag = '{}-{}'.format(self.service, self.uuid) self.cmd_addl_env = cmd_addl_env or {} self.cmd_addl_env[PROCESS_TAG] = process_tag @@ -186,7 +186,7 @@ class ProcessManager(MonitoredProcess): ServiceId = collections.namedtuple('ServiceId', ['uuid', 'service']) -class ProcessMonitor(object): +class ProcessMonitor: def __init__(self, config, resource_type): """Handle multiple process managers and watch over all of them. diff --git a/neutron/agent/linux/interface.py b/neutron/agent/linux/interface.py index 2915056452d..79b39ac5205 100644 --- a/neutron/agent/linux/interface.py +++ b/neutron/agent/linux/interface.py @@ -188,13 +188,13 @@ class LinuxInterfaceDriver(interface.LinuxInterfaceDriver, on-link route list """ device = ip_lib.IPDevice(device_name, namespace=namespace) - new_onlink_cidrs = set(s['cidr'] for s in extra_subnets or []) + new_onlink_cidrs = {s['cidr'] for s in extra_subnets or []} preserve_ips = set(preserve_ips if preserve_ips else []) onlink = device.route.list_onlink_routes(constants.IP_VERSION_4) if is_ipv6: onlink += device.route.list_onlink_routes(constants.IP_VERSION_6) - existing_onlink_cidrs = set(r['cidr'] for r in onlink) + existing_onlink_cidrs = {r['cidr'] for r in onlink} for route in new_onlink_cidrs - existing_onlink_cidrs: LOG.debug('Adding onlink route (%s)', route) @@ -245,8 +245,8 @@ class LinuxInterfaceDriver(interface.LinuxInterfaceDriver, """Configure handling of IPv6 Router Advertisements on an interface. See common/constants.py for possible values. """ - cmd = ['net.ipv6.conf.%(dev)s.accept_ra=%(value)s' % {'dev': dev_name, - 'value': value}] + cmd = ['net.ipv6.conf.{dev}.accept_ra={value}'.format(dev=dev_name, + value=value)] ip_lib.sysctl(cmd, namespace=namespace) @staticmethod @@ -314,7 +314,7 @@ class OVSInterfaceDriver(LinuxInterfaceDriver): DEV_NAME_PREFIX = constants.TAP_DEVICE_PREFIX def __init__(self, conf, **kwargs): - super(OVSInterfaceDriver, self).__init__(conf, **kwargs) + super().__init__(conf, **kwargs) ovs_conf.register_ovs_agent_opts(self.conf) if self.conf.ovs_use_veth: self.DEV_NAME_PREFIX = 'ns-' diff --git a/neutron/agent/linux/ip_conntrack.py b/neutron/agent/linux/ip_conntrack.py index 04dd42a976f..921329ce737 100644 --- a/neutron/agent/linux/ip_conntrack.py +++ b/neutron/agent/linux/ip_conntrack.py @@ -28,7 +28,7 @@ MAX_CONNTRACK_ZONES = 65535 ZONE_START = 4097 -class IpConntrackUpdate(object): +class IpConntrackUpdate: """Encapsulates a conntrack update An instance of this object carries the information necessary to @@ -58,7 +58,7 @@ def get_conntrack(get_rules_for_table_func, filtered_ports, unfiltered_ports, return CONTRACK_MGRS[namespace] -class IpConntrackManager(object): +class IpConntrackManager: """Smart wrapper for ip conntrack.""" def __init__(self, get_rules_for_table_func, filtered_ports, @@ -257,7 +257,7 @@ class IpConntrackManager(object): class OvsIpConntrackManager(IpConntrackManager): def __init__(self, execute=None): - super(OvsIpConntrackManager, self).__init__( + super().__init__( get_rules_for_table_func=None, filtered_ports={}, unfiltered_ports={}, execute=execute, namespace=None, zone_per_port=False) diff --git a/neutron/agent/linux/ip_lib.py b/neutron/agent/linux/ip_lib.py index 8ecf2a28116..6d9c14765fc 100644 --- a/neutron/agent/linux/ip_lib.py +++ b/neutron/agent/linux/ip_lib.py @@ -110,7 +110,7 @@ class DADFailed(AddressNotReady): InvalidArgument = privileged.InvalidArgument -class SubProcessBase(object): +class SubProcessBase: def __init__(self, namespace=None, log_fail_as_error=True): self.namespace = namespace @@ -155,7 +155,7 @@ class SubProcessBase(object): class IPWrapper(SubProcessBase): def __init__(self, namespace=None): - super(IPWrapper, self).__init__(namespace=namespace) + super().__init__(namespace=namespace) self.netns = IpNetnsCommand(self) def device(self, name): @@ -344,7 +344,7 @@ class IPWrapper(SubProcessBase): class IPDevice(SubProcessBase): def __init__(self, name, namespace=None, kind='link'): - super(IPDevice, self).__init__(namespace=namespace) + super().__init__(namespace=namespace) self._name = name self.kind = kind self.link = IpLinkCommand(self) @@ -360,8 +360,8 @@ class IPDevice(SubProcessBase): return self.name def __repr__(self): - return "" % (self._name, - self.namespace) + return "".format(self._name, + self.namespace) def exists(self): """Return True if the device exists in the namespace.""" @@ -441,7 +441,7 @@ class IPDevice(SubProcessBase): self._name = name -class IpDeviceCommandBase(object): +class IpDeviceCommandBase: def __init__(self, parent): self._parent = parent @@ -639,7 +639,7 @@ class IpAddrCommand(IpDeviceCommandBase): class IpRouteCommand(IpDeviceCommandBase): def __init__(self, parent, table=None): - super(IpRouteCommand, self).__init__(parent) + super().__init__(parent) self._table = table def add_gateway(self, gateway, metric=None, table=None, scope='global'): @@ -693,7 +693,7 @@ class IpRouteCommand(IpDeviceCommandBase): class IPRoute(SubProcessBase): def __init__(self, namespace=None, table=None): - super(IPRoute, self).__init__(namespace=namespace) + super().__init__(namespace=namespace) self.name = None self.route = IpRouteCommand(self, table=table) @@ -743,7 +743,7 @@ class IpNeighCommand(IpDeviceCommandBase): self.delete(entry['dst'], entry['lladdr']) -class IpNetnsCommand(object): +class IpNetnsCommand: def __init__(self, parent): self._parent = parent @@ -1286,7 +1286,7 @@ def _parse_ip_rule(rule, ip_version): fwmark = rule['attrs'].get('FRA_FWMARK') if fwmark: fwmask = rule['attrs'].get('FRA_FWMASK') - parsed_rule['fwmark'] = '{0:#x}/{1:#x}'.format(fwmark, fwmask) + parsed_rule['fwmark'] = f'{fwmark:#x}/{fwmask:#x}' iifname = rule['attrs'].get('FRA_IIFNAME') if iifname: parsed_rule['iif'] = iifname @@ -1615,7 +1615,7 @@ def list_ip_routes(namespace, ip_version, scope=None, via=None, table=None, for route in routes: cidr = linux_utils.get_attr(route, 'RTA_DST') if cidr: - cidr = '%s/%s' % (cidr, route['dst_len']) + cidr = '{}/{}'.format(cidr, route['dst_len']) else: cidr = constants.IP_ANY[ip_version] table = int(linux_utils.get_attr(route, 'RTA_TABLE')) diff --git a/neutron/agent/linux/ipset_manager.py b/neutron/agent/linux/ipset_manager.py index a2c9c91cb3f..733504d3e5f 100644 --- a/neutron/agent/linux/ipset_manager.py +++ b/neutron/agent/linux/ipset_manager.py @@ -24,7 +24,7 @@ SWAP_SUFFIX = '-n' IPSET_NAME_MAX_LENGTH = 31 - len(SWAP_SUFFIX) -class IpsetManager(object): +class IpsetManager: """Smart wrapper for ipset. Keeps track of ip addresses per set, using bulk @@ -120,10 +120,10 @@ class IpsetManager(object): def _refresh_set(self, set_name, member_ips, ethertype): new_set_name = set_name + SWAP_SUFFIX set_type = self._get_ipset_set_type(ethertype) - process_input = ["create %s hash:net family %s" % (new_set_name, - set_type)] + process_input = ["create {} hash:net family {}".format(new_set_name, + set_type)] for ip in member_ips: - process_input.append("add %s %s" % (new_set_name, ip)) + process_input.append("add {} {}".format(new_set_name, ip)) self._restore_sets(process_input) self._swap_sets(new_set_name, set_name) diff --git a/neutron/agent/linux/iptables_firewall.py b/neutron/agent/linux/iptables_firewall.py index c81fae9f3db..2fd329df171 100644 --- a/neutron/agent/linux/iptables_firewall.py +++ b/neutron/agent/linux/iptables_firewall.py @@ -429,8 +429,8 @@ class IptablesFirewallDriver(firewall.FirewallDriver): for dev, match in ((br_dev, match_physdev), (br_dev, match_interface), (port_dev, match_physdev)): match = match % dev - rule = '%s -m comment --comment "%s" -j CT %s' % (match, comment, - conntrack) + rule = '{} -m comment --comment "{}" -j CT {}'.format( + match, comment, conntrack) rules.append(rule) return rules @@ -853,7 +853,7 @@ class IptablesFirewallDriver(firewall.FirewallDriver): args += ['--%s' % direction, '%s' % port_range_min] else: args += ['-m', 'multiport', '--%ss' % direction, - '%s:%s' % (port_range_min, port_range_max)] + '{}:{}'.format(port_range_min, port_range_max)] return args def _ip_prefix_arg(self, direction, ip_prefix): @@ -872,7 +872,7 @@ class IptablesFirewallDriver(firewall.FirewallDriver): def _port_chain_name(self, port, direction): return iptables_manager.get_chain_name( - '%s%s' % (CHAIN_NAME_PREFIX[direction], port['device'][3:])) + '{}{}'.format(CHAIN_NAME_PREFIX[direction], port['device'][3:])) def filter_defer_apply_on(self): if not self._defer_apply: @@ -1032,7 +1032,7 @@ class IptablesFirewallDriver(firewall.FirewallDriver): def _get_sg_members(self, sg_info, sg_id, ethertype): ip_mac_addresses = sg_info.get(sg_id, {}).get(ethertype, []) - return set([ip_mac[0] for ip_mac in ip_mac_addresses]) + return {ip_mac[0] for ip_mac in ip_mac_addresses} def filter_defer_apply_off(self): if self._defer_apply: @@ -1054,12 +1054,11 @@ class OVSHybridIptablesFirewallDriver(IptablesFirewallDriver): def _port_chain_name(self, port, direction): return iptables_manager.get_chain_name( - '%s%s' % (CHAIN_NAME_PREFIX[direction], port['device'])) + '{}{}'.format(CHAIN_NAME_PREFIX[direction], port['device'])) def _get_br_device_name(self, port): return ('qvb' + port['device'])[:constants.LINUX_DEV_LEN] def _get_device_name(self, port): - device_name = super( - OVSHybridIptablesFirewallDriver, self)._get_device_name(port) + device_name = super()._get_device_name(port) return get_hybrid_port_name(device_name) diff --git a/neutron/agent/linux/iptables_manager.py b/neutron/agent/linux/iptables_manager.py index e7537575f75..59059a58ac9 100644 --- a/neutron/agent/linux/iptables_manager.py +++ b/neutron/agent/linux/iptables_manager.py @@ -77,12 +77,12 @@ def comment_rule(rule, comment): comment = '-m comment --comment "%s"' % comment if rule.startswith('-j'): # this is a jump only rule so we just put the comment first - return '%s %s' % (comment, rule) + return '{} {}'.format(comment, rule) try: jpos = rule.index(' -j ') return ' '.join((rule[:jpos], comment, rule[jpos + 1:])) except ValueError: - return '%s %s' % (rule, comment) + return '{} {}'.format(rule, comment) def get_chain_name(chain_name, wrap=True): @@ -92,7 +92,7 @@ def get_chain_name(chain_name, wrap=True): return chain_name[:constants.MAX_IPTABLES_CHAIN_LEN_NOWRAP] -class IptablesRule(object): +class IptablesRule: """An iptables rule. You shouldn't need to use this class directly, it's only used by @@ -121,16 +121,16 @@ class IptablesRule(object): def __str__(self): if self.wrap: - chain = '%s-%s' % (self.wrap_name, self.chain) + chain = '{}-{}'.format(self.wrap_name, self.chain) else: chain = self.chain - rule = '-A %s %s' % (chain, self.rule) + rule = '-A {} {}'.format(chain, self.rule) # If self.rule is '' the above will cause a trailing space, which # could cause us to not match on save/restore, so strip it now. return comment_rule(rule.strip(), self.comment) -class IptablesTable(object): +class IptablesTable: """An iptables table.""" def __init__(self, binary_name=binary_name): @@ -195,7 +195,7 @@ class IptablesTable(object): self.remove_rules += [str(r) for r in self.rules if r.chain == name or jump_snippet in r.rule] else: - jump_snippet = '-j %s-%s' % (self.wrap_name, name) + jump_snippet = '-j {}-{}'.format(self.wrap_name, name) # Remove rules from list that have a matching chain name or # a matching jump chain @@ -227,7 +227,7 @@ class IptablesTable(object): def _wrap_target_chain(self, s, wrap): if s.startswith('$'): - s = ('%s-%s' % (self.wrap_name, get_chain_name(s[1:], wrap))) + s = ('{}-{}'.format(self.wrap_name, get_chain_name(s[1:], wrap))) return s @@ -277,7 +277,7 @@ class IptablesTable(object): self.rules.remove(rule) -class IptablesManager(object): +class IptablesManager: """Wrapper for iptables. See IptablesTable for some usage docs @@ -580,7 +580,7 @@ class IptablesManager(object): s += [('ip6tables', self.ipv6)] all_commands = [] # variable to keep track all commands for return val for cmd, tables in s: - args = ['%s-save' % (cmd,)] + args = ['{}-save'.format(cmd)] if self.namespace: args = ['ip', 'netns', 'exec', self.namespace] + args try: @@ -624,7 +624,7 @@ class IptablesManager(object): # always end with a new line commands.append('') - args = ['%s-restore' % (cmd,), '-n'] + args = ['{}-restore'.format(cmd), '-n'] if self.namespace: args = ['ip', 'netns', 'exec', self.namespace] + args @@ -684,7 +684,7 @@ class IptablesManager(object): line.strip() not in rules] # generate our list of chain names - our_chains = [':%s-%s' % (self.wrap_name, name) for name in chains] + our_chains = [':{}-{}'.format(self.wrap_name, name) for name in chains] # the unwrapped chains (e.g. neutron-filter-top) may already exist in # the new_filter since they aren't marked by the wrap_name so we only diff --git a/neutron/agent/linux/keepalived.py b/neutron/agent/linux/keepalived.py index c29074605ef..95119491128 100644 --- a/neutron/agent/linux/keepalived.py +++ b/neutron/agent/linux/keepalived.py @@ -65,7 +65,7 @@ def get_free_range(parent_range, excluded_ranges, size=PRIMARY_VIP_RANGE_SIZE): free_cidrs = netaddr.IPSet([parent_range]) - netaddr.IPSet(excluded_ranges) for cidr in free_cidrs.iter_cidrs(): if cidr.prefixlen <= size: - return '%s/%s' % (cidr.network, size) + return '{}/{}'.format(cidr.network, size) raise ValueError(_('Network of size %(size)s, from IP range ' '%(parent_range)s excluding IP ranges ' @@ -82,7 +82,7 @@ class InvalidInstanceStateException(exceptions.NeutronException): def __init__(self, **kwargs): if 'valid_states' not in kwargs: kwargs['valid_states'] = ', '.join(VALID_STATES) - super(InvalidInstanceStateException, self).__init__(**kwargs) + super().__init__(**kwargs) class InvalidAuthenticationTypeException(exceptions.NeutronException): @@ -92,10 +92,10 @@ class InvalidAuthenticationTypeException(exceptions.NeutronException): def __init__(self, **kwargs): if 'valid_auth_types' not in kwargs: kwargs['valid_auth_types'] = ', '.join(VALID_AUTH_TYPES) - super(InvalidAuthenticationTypeException, self).__init__(**kwargs) + super().__init__(**kwargs) -class KeepalivedVipAddress(object): +class KeepalivedVipAddress: """A virtual address entry of a keepalived configuration.""" def __init__(self, ip_address, interface_name, scope=None, track=True): @@ -109,13 +109,13 @@ class KeepalivedVipAddress(object): self.ip_address == other.ip_address) def __str__(self): - return '[%s, %s, %s, %s]' % (self.ip_address, - self.interface_name, - self.scope, - self.track) + return '[{}, {}, {}, {}]'.format(self.ip_address, + self.interface_name, + self.scope, + self.track) def build_config(self): - result = '%s dev %s' % (self.ip_address, self.interface_name) + result = '{} dev {}'.format(self.ip_address, self.interface_name) if self.scope: result += ' scope %s' % self.scope if not self.track and _is_keepalived_use_no_track_supported(): @@ -123,7 +123,7 @@ class KeepalivedVipAddress(object): return result -class KeepalivedVirtualRoute(object): +class KeepalivedVirtualRoute: """A virtual route entry of a keepalived configuration.""" def __init__(self, destination, nexthop, interface_name=None, @@ -152,7 +152,7 @@ class KeepalivedVirtualRoute(object): return output -class KeepalivedInstanceRoutes(object): +class KeepalivedInstanceRoutes: def __init__(self): self.gateway_routes = [] self.extra_routes = [] @@ -181,7 +181,7 @@ class KeepalivedInstanceRoutes(object): [' }']) -class KeepalivedInstance(object): +class KeepalivedInstance: """Instance section of a keepalived configuration.""" def __init__(self, state, interface, vrouter_id, ha_cidrs, @@ -264,7 +264,8 @@ class KeepalivedInstance(object): ip = (netaddr.IPNetwork(self.primary_vip_range).network + self.vrouter_id) - return str(netaddr.IPNetwork('%s/%s' % (ip, PRIMARY_VIP_RANGE_SIZE))) + return str(netaddr.IPNetwork('{}/{}'.format( + ip, PRIMARY_VIP_RANGE_SIZE))) def _build_vips_config(self): # NOTE(amuller): The primary VIP must be consistent in order to avoid @@ -349,7 +350,7 @@ class KeepalivedInstance(object): return config -class KeepalivedConf(object): +class KeepalivedConf: """A keepalived configuration.""" def __init__(self): @@ -384,7 +385,7 @@ class KeepalivedConf(object): return '\n'.join(self.build_config()) -class KeepalivedManager(object): +class KeepalivedManager: """Wrapper for keepalived. This wrapper permits to write keepalived config files, to start/restart @@ -436,7 +437,7 @@ class KeepalivedManager(object): try: with open(config_path) as conf: return conf.read() - except (OSError, IOError) as e: + except OSError as e: if e.errno != errno.ENOENT: raise @@ -535,7 +536,7 @@ class KeepalivedTrackScript(KeepalivedConf): def build_config_preamble(self): config = ['', - 'vrrp_script %s_%s {' % (HEALTH_CHECK_NAME, self.vr_id), + 'vrrp_script {}_{} {{'.format(HEALTH_CHECK_NAME, self.vr_id), ' script "%s"' % self._get_script_location(), ' interval %s' % self.interval, ' fall 2', @@ -557,7 +558,7 @@ class KeepalivedTrackScript(KeepalivedConf): return '' config = [' track_script {', - ' %s_%s' % (HEALTH_CHECK_NAME, self.vr_id), + ' {}_{}'.format(HEALTH_CHECK_NAME, self.vr_id), ' }'] return config @@ -575,7 +576,7 @@ class KeepalivedTrackScript(KeepalivedConf): 6: 'ping6', }.get(netaddr.IPAddress(ip_addr).version) - return '%s -c 1 -w 1 %s 1>/dev/null || exit 1' % (cmd, ip_addr) + return '{} -c 1 -w 1 {} 1>/dev/null || exit 1'.format(cmd, ip_addr) def _check_ip_assigned(self): cmd = 'ip a | grep %s || exit 0' diff --git a/neutron/agent/linux/l3_tc_lib.py b/neutron/agent/linux/l3_tc_lib.py index aa9ec701148..458d553801b 100644 --- a/neutron/agent/linux/l3_tc_lib.py +++ b/neutron/agent/linux/l3_tc_lib.py @@ -103,8 +103,8 @@ class FloatingIPTcCommandBase(ip_lib.IPDevice): return filterids def _add_filter(self, qdisc_id, direction, ip, rate, burst): - rate_value = "%s%s" % (rate, tc_lib.BW_LIMIT_UNIT) - burst_value = "%s%s" % ( + rate_value = "{}{}".format(rate, tc_lib.BW_LIMIT_UNIT) + burst_value = "{}{}".format( tc_lib.TcCommand.get_ingress_qdisc_burst_value(rate, burst), tc_lib.BURST_UNIT ) diff --git a/neutron/agent/linux/of_monitor.py b/neutron/agent/linux/of_monitor.py index a5148a64d22..63183ac505c 100644 --- a/neutron/agent/linux/of_monitor.py +++ b/neutron/agent/linux/of_monitor.py @@ -20,7 +20,7 @@ import eventlet from neutron.agent.common import async_process -class OFEvent(object): +class OFEvent: def __init__(self, event_type, flow): self.event_type = event_type @@ -46,9 +46,9 @@ class OFMonitor(async_process.AsyncProcess): def __init__(self, bridge_name, namespace=None, respawn_interval=None, start=True): cmd = ['ovs-ofctl', 'monitor', bridge_name, 'watch:', '--monitor'] - super(OFMonitor, self).__init__(cmd, run_as_root=True, - respawn_interval=respawn_interval, - namespace=namespace) + super().__init__(cmd, run_as_root=True, + respawn_interval=respawn_interval, + namespace=namespace) if start: self.start() @@ -77,8 +77,8 @@ class OFMonitor(async_process.AsyncProcess): def start(self, **kwargs): if not self._is_running: - super(OFMonitor, self).start(block=True) + super().start(block=True) def stop(self, **kwargs): if self._is_running: - super(OFMonitor, self).stop(block=True) + super().stop(block=True) diff --git a/neutron/agent/linux/openvswitch_firewall/firewall.py b/neutron/agent/linux/openvswitch_firewall/firewall.py index 496f918503e..99f41b50d99 100644 --- a/neutron/agent/linux/openvswitch_firewall/firewall.py +++ b/neutron/agent/linux/openvswitch_firewall/firewall.py @@ -60,7 +60,7 @@ def _replace_register(flow_params, register_number, register_value): try: reg_port = flow_params[register_value] del flow_params[register_value] - flow_params['reg{:d}'.format(register_number)] = reg_port + flow_params[f'reg{register_number:d}'] = reg_port except KeyError: pass @@ -126,7 +126,7 @@ def get_tag_from_other_config(bridge, port_name): port_name=port_name, other_config=other_config) -class SecurityGroup(object): +class SecurityGroup: def __init__(self, id_): self.id = id_ self.raw_rules = [] @@ -161,7 +161,7 @@ class SecurityGroup(object): return self.members.get(ethertype, []) -class OFPort(object): +class OFPort: def __init__(self, port_dict, ovs_port, vlan_tag, segment_id=None, network_type=None, physical_network=None): self.id = port_dict['device'] @@ -213,7 +213,7 @@ class OFPort(object): self.neutron_port_dict = port_dict.copy() -class SGPortMap(object): +class SGPortMap: def __init__(self): self.ports = {} self.sec_groups = {} @@ -262,7 +262,7 @@ class SGPortMap(object): sec_group.members = members -class ConjIdMap(object): +class ConjIdMap: """Handle conjunction ID allocations and deallocations.""" CONJ_ID_BLOCK_SIZE = 8 @@ -270,7 +270,7 @@ class ConjIdMap(object): def __new__(cls, int_br): if not hasattr(cls, '_instance'): - cls._instance = super(ConjIdMap, cls).__new__(cls) + cls._instance = super().__new__(cls) return cls._instance def __init__(self, int_br): @@ -348,7 +348,7 @@ class ConjIdMap(object): return a list of (remote_sg_id, conj_id), which are no longer in use. """ - result = set([]) + result = set() for k in list(self.id_map.keys()): if sg_id in k[0:2]: conj_id = self.id_map.pop(k) @@ -367,7 +367,7 @@ class ConjIdMap(object): return result -class ConjIPFlowManager(object): +class ConjIPFlowManager: """Manage conj_id allocation and remote securitygroups derived conjunction flows. @@ -614,7 +614,7 @@ class OVSFirewallDriver(firewall.FirewallDriver): dl_type = kwargs.get('dl_type') create_reg_numbers(kwargs) if isinstance(dl_type, int): - kwargs['dl_type'] = "0x{:04x}".format(dl_type) + kwargs['dl_type'] = f"0x{dl_type:04x}" if self._update_cookie: kwargs['cookie'] = self._update_cookie if self._deferred: @@ -1279,7 +1279,7 @@ class OVSFirewallDriver(firewall.FirewallDriver): priority=12, dl_dst=mac, reg_net=vlan_tag, - actions='output:{:d}'.format(dst_port) + actions=f'output:{dst_port:d}' ) # For packets from patch ports. self._add_flow( @@ -1288,7 +1288,7 @@ class OVSFirewallDriver(firewall.FirewallDriver): priority=12, dl_dst=mac, dl_vlan=vlan_tag, - actions='strip_vlan,output:{:d}'.format(dst_port) + actions=f'strip_vlan,output:{dst_port:d}' ) # The former flow may not match, that means the destination port is @@ -1410,7 +1410,7 @@ class OVSFirewallDriver(firewall.FirewallDriver): dl_type=lib_const.ETHERTYPE_IPV6, nw_proto=lib_const.PROTO_NUM_IPV6_ICMP, icmp_type=icmp_type, - actions='output:{:d}'.format(port.ofport) + actions=f'output:{port.ofport:d}' ) def _initialize_ingress(self, port): @@ -1420,7 +1420,7 @@ class OVSFirewallDriver(firewall.FirewallDriver): priority=100, dl_type=lib_const.ETHERTYPE_ARP, reg_port=port.ofport, - actions='output:{:d}'.format(port.ofport) + actions=f'output:{port.ofport:d}' ) # Allow custom ethertypes @@ -1430,7 +1430,7 @@ class OVSFirewallDriver(firewall.FirewallDriver): priority=100, dl_type=permitted_ethertype, reg_port=port.ofport, - actions='output:{:d}'.format(port.ofport)) + actions=f'output:{port.ofport:d}') self._initialize_ingress_ipv6_icmp(port) @@ -1446,7 +1446,7 @@ class OVSFirewallDriver(firewall.FirewallDriver): nw_proto=lib_const.PROTO_NUM_UDP, tp_src=src_port, tp_dst=dst_port, - actions='output:{:d}'.format(port.ofport) + actions=f'output:{port.ofport:d}' ) # Track untracked @@ -1466,7 +1466,7 @@ class OVSFirewallDriver(firewall.FirewallDriver): ct_state=ovsfw_consts.OF_STATE_TRACKED, priority=80, reg_port=port.ofport, - actions='resubmit(,{:d})'.format(ovs_consts.RULES_INGRESS_TABLE) + actions=f'resubmit(,{ovs_consts.RULES_INGRESS_TABLE:d})' ) def _initialize_tracked_ingress(self, port): @@ -1495,10 +1495,10 @@ class OVSFirewallDriver(firewall.FirewallDriver): # offload case. In case the explicitly_egress_direct is used the # pipeline don't contain action NORMAL so we don't have flood rule # issue. - actions = 'output:{:d}'.format(port.ofport) + actions = f'output:{port.ofport:d}' if (self.int_br.br.is_hw_offload_enabled and not cfg.CONF.AGENT.explicitly_egress_direct): - actions = 'mod_vlan_vid:{:d},normal'.format(port.vlan_tag) + actions = f'mod_vlan_vid:{port.vlan_tag:d},normal' # Allow established and related connections for state in (ovsfw_consts.OF_STATE_ESTABLISHED_REPLY, ovsfw_consts.OF_STATE_RELATED): diff --git a/neutron/agent/linux/openvswitch_firewall/iptables.py b/neutron/agent/linux/openvswitch_firewall/iptables.py index f4e3beed93d..4cd913ad138 100644 --- a/neutron/agent/linux/openvswitch_firewall/iptables.py +++ b/neutron/agent/linux/openvswitch_firewall/iptables.py @@ -40,7 +40,7 @@ def is_bridge_cleaned(bridge): return other_config.get(Helper.CLEANED_METADATA, '').lower() == 'true' -class Helper(object): +class Helper: """Helper to avoid loading firewall driver. The main purpose is to avoid loading iptables driver for cases where no diff --git a/neutron/agent/linux/openvswitch_firewall/rules.py b/neutron/agent/linux/openvswitch_firewall/rules.py index 7134aee34ad..99b3dc45830 100644 --- a/neutron/agent/linux/openvswitch_firewall/rules.py +++ b/neutron/agent/linux/openvswitch_firewall/rules.py @@ -210,7 +210,7 @@ def populate_flow_common(direction, flow_template, port): """Initialize common flow fields.""" if direction == n_consts.INGRESS_DIRECTION: flow_template['table'] = ovs_consts.RULES_INGRESS_TABLE - flow_template['actions'] = "output:{:d}".format(port.ofport) + flow_template['actions'] = f"output:{port.ofport:d}" elif direction == n_consts.EGRESS_DIRECTION: flow_template['table'] = ovs_consts.RULES_EGRESS_TABLE # Traffic can be both ingress and egress, check that no ingress rules @@ -241,10 +241,10 @@ def create_port_range_flows(flow_template, rule): if protocol is None: return [] flows = [] - src_port_match = '{:s}_src'.format(protocol) + src_port_match = f'{protocol:s}_src' src_port_min = rule.get('source_port_range_min') src_port_max = rule.get('source_port_range_max') - dst_port_match = '{:s}_dst'.format(protocol) + dst_port_match = f'{protocol:s}_dst' dst_port_min = rule.get('port_range_min') dst_port_max = rule.get('port_range_max') diff --git a/neutron/agent/linux/pd.py b/neutron/agent/linux/pd.py index c5c3117509b..2428aaf13a7 100644 --- a/neutron/agent/linux/pd.py +++ b/neutron/agent/linux/pd.py @@ -32,7 +32,7 @@ from neutron.common import utils LOG = logging.getLogger(__name__) -class PrefixDelegation(object): +class PrefixDelegation: def __init__(self, context, pmon, intf_driver, notifier, pd_update_cb, agent_conf): self.context = context @@ -394,7 +394,7 @@ def update_router(resource, event, l3_agent, payload): router['ns_name'] = updated_router.get_gw_ns_name() -class PDInfo(object): +class PDInfo: """A class to simplify storing and passing of information relevant to Prefix Delegation operations for a given subnet. """ diff --git a/neutron/agent/linux/pd_driver.py b/neutron/agent/linux/pd_driver.py index 6409f9398e2..db934b931b1 100644 --- a/neutron/agent/linux/pd_driver.py +++ b/neutron/agent/linux/pd_driver.py @@ -20,7 +20,7 @@ from neutron.conf.agent import common as agent_conf agent_conf.register_pddriver_opts() -class PDDriverBase(object, metaclass=abc.ABCMeta): +class PDDriverBase(metaclass=abc.ABCMeta): def __init__(self, router_id, subnet_id, ri_ifname): self.router_id = router_id diff --git a/neutron/agent/linux/ra.py b/neutron/agent/linux/ra.py index 270e033bc47..959a9b66d82 100644 --- a/neutron/agent/linux/ra.py +++ b/neutron/agent/linux/ra.py @@ -80,7 +80,7 @@ CONFIG_TEMPLATE = jinja2.Template("""interface {{ interface_name }} """) -class DaemonMonitor(object): +class DaemonMonitor: """Manage the data and state of an radvd process.""" def __init__(self, router_id, router_ns, process_monitor, dev_name_helper, diff --git a/neutron/agent/linux/tc_lib.py b/neutron/agent/linux/tc_lib.py index 5844a24ac8d..b6ece9ee5a5 100644 --- a/neutron/agent/linux/tc_lib.py +++ b/neutron/agent/linux/tc_lib.py @@ -203,7 +203,7 @@ class TcCommand(ip_lib.IPDevice): def __init__(self, name, kernel_hz, namespace=None): if kernel_hz <= 0: raise InvalidKernelHzValue(value=kernel_hz) - super(TcCommand, self).__init__(name, namespace=namespace) + super().__init__(name, namespace=namespace) self.kernel_hz = kernel_hz @staticmethod diff --git a/neutron/agent/linux/utils.py b/neutron/agent/linux/utils.py index 06492cc998d..a128fd74ed6 100644 --- a/neutron/agent/linux/utils.py +++ b/neutron/agent/linux/utils.py @@ -43,7 +43,7 @@ from neutron.privileged.agent.linux import utils as priv_utils LOG = logging.getLogger(__name__) -class RootwrapDaemonHelper(object): +class RootwrapDaemonHelper: __client = None __lock = threading.Lock() @@ -237,18 +237,18 @@ def _get_conf_base(cfg_root, uuid, ensure_conf_dir): def get_conf_file_name(cfg_root, uuid, cfg_file, ensure_conf_dir=False): """Returns the file name for a given kind of config file.""" conf_base = _get_conf_base(cfg_root, uuid, ensure_conf_dir) - return "%s.%s" % (conf_base, cfg_file) + return "{}.{}".format(conf_base, cfg_file) def get_value_from_file(filename, converter=None): try: - with open(filename, 'r') as f: + with open(filename) as f: try: return converter(f.read()) if converter else f.read() except ValueError: LOG.error('Unable to convert value in %s', filename) - except IOError as error: + except OSError as error: LOG.debug('Unable to access %(filename)s; Error: %(error)s', {'filename': filename, 'error': error}) @@ -318,9 +318,9 @@ def get_cmdline_from_pid(pid): # NOTE(jh): Even after the above check, the process may terminate # before the open below happens try: - with open('/proc/%s/cmdline' % pid, 'r') as f: + with open('/proc/%s/cmdline' % pid) as f: cmdline = f.readline().split('\0')[:-1] - except IOError: + except OSError: return [] # NOTE(slaweq): sometimes it may happen that values in @@ -466,8 +466,8 @@ class UnixDomainWSGIServer(wsgi.Server): self._socket = None self._launcher = None self._server = None - super(UnixDomainWSGIServer, self).__init__(name, disable_ssl=True, - num_threads=num_threads) + super().__init__(name, disable_ssl=True, + num_threads=num_threads) def start(self, application, file_socket, workers, backlog, mode=None): self._socket = eventlet.listen(file_socket, diff --git a/neutron/agent/metadata/driver_base.py b/neutron/agent/metadata/driver_base.py index 6be9d129d39..42cbabd323c 100644 --- a/neutron/agent/metadata/driver_base.py +++ b/neutron/agent/metadata/driver_base.py @@ -46,7 +46,7 @@ listen listener """ -class HaproxyConfiguratorBase(object, metaclass=abc.ABCMeta): +class HaproxyConfiguratorBase(metaclass=abc.ABCMeta): PROXY_CONFIG_DIR = None HEADER_CONFIG_TEMPLATE = None @@ -75,7 +75,7 @@ class HaproxyConfiguratorBase(object, metaclass=abc.ABCMeta): # starts with "haproxy" then things will get logged to # /var/log/haproxy.log on Debian distros, instead of to syslog. uuid = network_id or router_id - self.log_tag = "haproxy-{}-{}".format(METADATA_SERVICE_NAME, uuid) + self.log_tag = f"haproxy-{METADATA_SERVICE_NAME}-{uuid}" self._haproxy_cfg = '' self._resource_id = None self._create_config() @@ -129,7 +129,7 @@ class HaproxyConfiguratorBase(object, metaclass=abc.ABCMeta): } if self.host_v6 and self.bind_interface: cfg_info['bind_v6_line'] = ( - 'bind %s:%s interface %s' % ( + 'bind {}:{} interface {}'.format( self.host_v6, self.port, self.bind_interface) ) # If using the network ID, delete any spurious router ID that might @@ -198,7 +198,7 @@ class HaproxyConfiguratorBase(object, metaclass=abc.ABCMeta): linux_utils.delete_if_exists(cfg_path, run_as_root=True) -class MetadataDriverBase(object, metaclass=abc.ABCMeta): +class MetadataDriverBase(metaclass=abc.ABCMeta): monitors = {} @staticmethod diff --git a/neutron/agent/metadata/proxy_base.py b/neutron/agent/metadata/proxy_base.py index 297ddce74f4..8513e472292 100644 --- a/neutron/agent/metadata/proxy_base.py +++ b/neutron/agent/metadata/proxy_base.py @@ -39,7 +39,7 @@ MODE_MAP = { } -class MetadataProxyHandlerBase(object, metaclass=abc.ABCMeta): +class MetadataProxyHandlerBase(metaclass=abc.ABCMeta): NETWORK_ID_HEADER = None ROUTER_ID_HEADER = None @@ -196,7 +196,7 @@ class MetadataProxyHandlerBase(object, metaclass=abc.ABCMeta): resp.status_code) -class UnixDomainMetadataProxyBase(object, metaclass=abc.ABCMeta): +class UnixDomainMetadataProxyBase(metaclass=abc.ABCMeta): def __init__(self, conf): self.conf = conf diff --git a/neutron/agent/ovn/extensions/extension_manager.py b/neutron/agent/ovn/extensions/extension_manager.py index 2947200d68e..d427b0f4d66 100644 --- a/neutron/agent/ovn/extensions/extension_manager.py +++ b/neutron/agent/ovn/extensions/extension_manager.py @@ -113,7 +113,7 @@ class OVNAgentExtension(extension.AgentExtension, metaclass=abc.ABCMeta): pass -class OVNAgentExtensionAPI(object): +class OVNAgentExtensionAPI: """Implements the OVN Neutron Agent API""" def __init__(self): diff --git a/neutron/agent/ovn/metadata/agent.py b/neutron/agent/ovn/metadata/agent.py index f8eb56d0c19..06064abe7f5 100644 --- a/neutron/agent/ovn/metadata/agent.py +++ b/neutron/agent/ovn/metadata/agent.py @@ -335,7 +335,7 @@ class SbGlobalUpdateEvent(_OVNExtensionEvent, row_event.RowEvent): def __init__(self, agent): table = 'SB_Global' events = (self.ROW_UPDATE,) - super(SbGlobalUpdateEvent, self).__init__(events, table, None) + super().__init__(events, table, None) self._agent = agent self.event_name = self.__class__.__name__ self.first_run = True @@ -366,7 +366,7 @@ class SbGlobalUpdateEvent(_OVNExtensionEvent, row_event.RowEvent): timer.start() -class MetadataAgent(object): +class MetadataAgent: def __init__(self, conf): self._conf = conf @@ -531,11 +531,11 @@ class MetadataAgent(object): ns.decode('utf-8') if isinstance(ns, bytes) else ns for ns in ip_lib.list_network_namespaces()) net_port_bindings = self.get_networks_port_bindings() - metadata_namespaces = set( + metadata_namespaces = { self._get_namespace_name( ovn_utils.get_network_name_from_datapath(datapath)) for datapath in (pb.datapath for pb in net_port_bindings) - ) + } unused_namespaces = [ns for ns in system_namespaces if ns.startswith(NS_PREFIX) and ns not in metadata_namespaces] diff --git a/neutron/agent/ovn/metadata/ovsdb.py b/neutron/agent/ovn/metadata/ovsdb.py index 0ffd676abcd..402f0f45f36 100644 --- a/neutron/agent/ovn/metadata/ovsdb.py +++ b/neutron/agent/ovn/metadata/ovsdb.py @@ -41,11 +41,11 @@ class MetadataAgentOvnSbIdl(ovsdb_monitor.OvnIdl): for table in tables: helper.register_table(table) try: - super(MetadataAgentOvnSbIdl, self).__init__( + super().__init__( None, connection_string, helper, leader_only=False) except TypeError: # TODO(twilson) We can remove this when we require ovs>=2.12.0 - super(MetadataAgentOvnSbIdl, self).__init__( + super().__init__( None, connection_string, helper) if chassis: for table in set(tables).intersection({'Chassis', @@ -69,7 +69,7 @@ class MetadataAgentOvnSbIdl(ovsdb_monitor.OvnIdl): pass -class MetadataAgentOvsIdl(object): +class MetadataAgentOvsIdl: def start(self): connection_string = config.cfg.CONF.ovs.ovsdb_connection diff --git a/neutron/agent/ovsdb/impl_idl.py b/neutron/agent/ovsdb/impl_idl.py index 3f82abcc1ee..30ba9e9d83a 100644 --- a/neutron/agent/ovsdb/impl_idl.py +++ b/neutron/agent/ovsdb/impl_idl.py @@ -43,7 +43,7 @@ def api_factory(): class OvsCleanup(command.BaseCommand): def __init__(self, api, bridge, all_ports=False): - super(OvsCleanup, self).__init__(api) + super().__init__(api) self.bridge = bridge self.all_ports = all_ports diff --git a/neutron/agent/ovsdb/native/connection.py b/neutron/agent/ovsdb/native/connection.py index dc22ffdde14..6e03bd0d4cf 100644 --- a/neutron/agent/ovsdb/native/connection.py +++ b/neutron/agent/ovsdb/native/connection.py @@ -64,8 +64,8 @@ class BridgeCreateEvent(idl_event.RowEvent): def __init__(self, agent): self.agent = agent table = 'Bridge' - super(BridgeCreateEvent, self).__init__((self.ROW_CREATE, ), - table, None) + super().__init__((self.ROW_CREATE, ), + table, None) self.event_name = 'BridgeCreateEvent' def run(self, event, row, old): @@ -83,7 +83,7 @@ class OvsIdl(idl.Idl): configure_ssl_conn() helper = self._get_ovsdb_helper(self._ovsdb_connection) helper.register_all() - super(OvsIdl, self).__init__(self._ovsdb_connection, helper) + super().__init__(self._ovsdb_connection, helper) self.notify_handler = ovsdb_event.RowEventHandler() @tenacity.retry(wait=tenacity.wait_exponential(multiplier=0.02), @@ -106,7 +106,7 @@ class OvsIdl(idl.Idl): class OvsIdlMonitor(OvsIdl): def __init__(self): - super(OvsIdlMonitor, self).__init__() + super().__init__() self._lock = threading.Lock() self._bridges_to_monitor = [] self._bridges_added_list = [] diff --git a/neutron/agent/resource_cache.py b/neutron/agent/resource_cache.py index 0a4ea1fb6bf..265d164d619 100644 --- a/neutron/agent/resource_cache.py +++ b/neutron/agent/resource_cache.py @@ -28,7 +28,7 @@ LOG = logging.getLogger(__name__) objects.register_objects() -class RemoteResourceCache(object): +class RemoteResourceCache: """Retrieves and stashes logical resources in their OVO format. This is currently only compatible with OVO objects that have an ID. @@ -233,7 +233,7 @@ class RemoteResourceCache(object): return changed -class RemoteResourceWatcher(object): +class RemoteResourceWatcher: """Converts RPC callback notifications to local registry notifications. This allows a constructor to listen for RPC callbacks for a given diff --git a/neutron/agent/rpc.py b/neutron/agent/rpc.py index 28bb07559e2..61b71dea27c 100644 --- a/neutron/agent/rpc.py +++ b/neutron/agent/rpc.py @@ -63,7 +63,7 @@ def create_consumers(endpoints, prefix, topic_details, start_listening=True): topic_name = topics.get_topic_name(prefix, topic, operation) connection.create_consumer(topic_name, endpoints, fanout=True) if node_name: - node_topic_name = '%s.%s' % (topic_name, node_name) + node_topic_name = '{}.{}'.format(topic_name, node_name) connection.create_consumer(node_topic_name, endpoints, fanout=False) @@ -72,7 +72,7 @@ def create_consumers(endpoints, prefix, topic_details, start_listening=True): return connection -class PluginReportStateAPI(object): +class PluginReportStateAPI: """RPC client used to report state back to plugin. This class implements the client side of an rpc interface. The server side @@ -105,7 +105,7 @@ class PluginReportStateAPI(object): return method(context, 'report_state', **kwargs) -class PluginApi(object): +class PluginApi: """Agent side of the rpc API. API version history: @@ -223,7 +223,7 @@ class CacheBackedPluginApi(PluginApi): resources.ADDRESSGROUP] def __init__(self, *args, **kwargs): - super(CacheBackedPluginApi, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.remote_resource_cache = None self._create_cache_for_l2_agent() @@ -280,7 +280,7 @@ class CacheBackedPluginApi(PluginApi): """ is_delete = event == callback_events.AFTER_DELETE suffix = 'delete' if is_delete else 'update' - method = "%s_%s" % (rtype, suffix) + method = "{}_{}".format(rtype, suffix) host_with_activation = None host_with_deactivation = None if is_delete or rtype != callback_resources.PORT: diff --git a/neutron/agent/securitygroups_rpc.py b/neutron/agent/securitygroups_rpc.py index b1ae0204462..d11a56e07d1 100644 --- a/neutron/agent/securitygroups_rpc.py +++ b/neutron/agent/securitygroups_rpc.py @@ -66,7 +66,7 @@ def disable_security_group_extension_by_config(aliases): _disable_extension(sg_rules_default_sg_def.ALIAS, aliases) -class SecurityGroupAgentRpc(object): +class SecurityGroupAgentRpc: """Enables SecurityGroup agent support in agent implementations.""" def __init__(self, context, plugin_rpc, local_vlan_map=None, diff --git a/neutron/api/api_common.py b/neutron/api/api_common.py index 095f59b93b2..caae2736783 100644 --- a/neutron/api/api_common.py +++ b/neutron/api/api_common.py @@ -126,8 +126,8 @@ def get_previous_link(request, items, id_key): marker = items[0][id_key] params['marker'] = marker params['page_reverse'] = True - return "%s?%s" % (prepare_url(get_path_url(request)), - urllib.parse.urlencode(params)) + return "{}?{}".format(prepare_url(get_path_url(request)), + urllib.parse.urlencode(params)) def get_next_link(request, items, id_key): @@ -137,8 +137,8 @@ def get_next_link(request, items, id_key): marker = items[-1][id_key] params['marker'] = marker params.pop('page_reverse', None) - return "%s?%s" % (prepare_url(get_path_url(request)), - urllib.parse.urlencode(params)) + return "{}?{}".format(prepare_url(get_path_url(request)), + urllib.parse.urlencode(params)) def prepare_url(orig_url): @@ -233,8 +233,8 @@ def get_sorts(request, attr_info): msg = _("The number of sort_keys and sort_dirs must be same") raise exc.HTTPBadRequest(explanation=msg) valid_dirs = [constants.SORT_DIRECTION_ASC, constants.SORT_DIRECTION_DESC] - valid_sort_keys = set(attr for attr, schema in attr_info.items() - if schema.get('is_sort_key', False)) + valid_sort_keys = {attr for attr, schema in attr_info.items() + if schema.get('is_sort_key', False)} absent_keys = [x for x in sort_keys if x not in valid_sort_keys] if absent_keys: msg = _("%s is invalid attribute for sort_keys") % absent_keys @@ -291,7 +291,7 @@ def is_filter_validation_supported(plugin): return getattr(plugin, filter_validation_attr_name, False) -class PaginationHelper(object): +class PaginationHelper: def __init__(self, request, primary_key='id'): self.request = request @@ -313,7 +313,7 @@ class PaginationHelper(object): class PaginationEmulatedHelper(PaginationHelper): def __init__(self, request, primary_key='id'): - super(PaginationEmulatedHelper, self).__init__(request, primary_key) + super().__init__(request, primary_key) self.limit, self.marker = get_limit_and_marker(request) self.page_reverse = get_page_reverse(request) @@ -375,7 +375,7 @@ class NoPaginationHelper(PaginationHelper): pass -class SortingHelper(object): +class SortingHelper: def __init__(self, request, attr_info): pass @@ -393,7 +393,7 @@ class SortingHelper(object): class SortingEmulatedHelper(SortingHelper): def __init__(self, request, attr_info): - super(SortingEmulatedHelper, self).__init__(request, attr_info) + super().__init__(request, attr_info) self.sort_dict = get_sorts(request, attr_info) def update_fields(self, original_fields, fields_to_add): @@ -450,8 +450,8 @@ def convert_exception_to_http_exc(e, faults, language): # all error codes are the same so we can maintain the code # and just concatenate the bodies joined_msg = "\n".join( - (jsonutils.loads(c.body)['NeutronError']['message'] - for c in converted_exceptions)) + jsonutils.loads(c.body)['NeutronError']['message'] + for c in converted_exceptions) new_body = jsonutils.loads(converted_exceptions[0].body) new_body['NeutronError']['message'] = joined_msg converted_exceptions[0].body = serializer.serialize(new_body) @@ -463,7 +463,7 @@ def convert_exception_to_http_exc(e, faults, language): inner_error_strings = [] for c in converted_exceptions: c_body = jsonutils.loads(c.body) - err = ('HTTP %s %s: %s' % ( + err = ('HTTP {} {}: {}'.format( c.code, c_body['NeutronError']['type'], c_body['NeutronError']['message'])) inner_error_strings.append(err) diff --git a/neutron/api/extensions.py b/neutron/api/extensions.py index 9c6db36be14..9250591e0dc 100644 --- a/neutron/api/extensions.py +++ b/neutron/api/extensions.py @@ -157,7 +157,7 @@ class ExtensionMiddleware(base.ConfigurableMiddleware): resource.collection) for action, method in resource.collection_actions.items(): conditions = dict(method=[method]) - path = "/%s/%s" % (resource.collection, action) + path = "/{}/{}".format(resource.collection, action) with mapper.submapper(controller=resource.controller, action=action, path_prefix=path_prefix, @@ -206,7 +206,7 @@ class ExtensionMiddleware(base.ConfigurableMiddleware): self._router = routes.middleware.RoutesMiddleware(self._dispatch, mapper, singleton=False) - super(ExtensionMiddleware, self).__init__(application) + super().__init__(application) @classmethod def factory(cls, global_config, **local_config): @@ -282,7 +282,7 @@ def plugin_aware_extension_middleware_factory(global_config, **local_config): return _factory -class ExtensionManager(object): +class ExtensionManager: """Load extensions from the configured extension path. See tests/unit/extensions/foxinsocks.py for an @@ -487,13 +487,12 @@ class PluginAwareExtensionManager(ExtensionManager): def __init__(self, path, plugins): self.plugins = plugins - super(PluginAwareExtensionManager, self).__init__(path) + super().__init__(path) self.check_if_plugin_extensions_loaded() def _check_extension(self, extension): """Check if an extension is supported by any plugin.""" - extension_is_valid = super(PluginAwareExtensionManager, - self)._check_extension(extension) + extension_is_valid = super()._check_extension(extension) if not extension_is_valid: return False @@ -572,7 +571,7 @@ class PluginAwareExtensionManager(ExtensionManager): extensions=list(missing_aliases)) -class RequestExtension(object): +class RequestExtension: """Extend requests and responses of core Neutron OpenStack API controllers. Provide a way to add data to responses and handle custom request data @@ -583,10 +582,10 @@ class RequestExtension(object): self.url_route = url_route self.handler = handler self.conditions = dict(method=[method]) - self.key = "%s-%s" % (method, url_route) + self.key = "{}-{}".format(method, url_route) -class ActionExtension(object): +class ActionExtension: """Add custom actions to core Neutron OpenStack API controllers.""" def __init__(self, collection, action_name, handler): @@ -595,7 +594,7 @@ class ActionExtension(object): self.handler = handler -class ResourceExtension(object): +class ResourceExtension: """Add top level resources to the OpenStack API in Neutron.""" def __init__(self, collection, controller, parent=None, path_prefix="", diff --git a/neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py b/neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py index 41f2ca82d21..35a697aa380 100644 --- a/neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py +++ b/neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py @@ -65,7 +65,7 @@ METHOD_PRIORITY_MAP = { LOG = logging.getLogger(__name__) -class DhcpAgentNotifyAPI(object): +class DhcpAgentNotifyAPI: """API for plugin to notify DHCP agent. This class implements the client side of an rpc interface. The server side diff --git a/neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py b/neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py index 8bcbac918f8..2413609ecf4 100644 --- a/neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py +++ b/neutron/api/rpc/agentnotifiers/l3_rpc_agent_api.py @@ -35,7 +35,7 @@ LOG = logging.getLogger(__name__) AGENT_NOTIFY_MAX_ATTEMPTS = 2 -class L3AgentNotifyAPI(object): +class L3AgentNotifyAPI: """API for plugin to notify L3 agent.""" def __init__(self, topic=topics.L3_AGENT): diff --git a/neutron/api/rpc/agentnotifiers/metering_rpc_agent_api.py b/neutron/api/rpc/agentnotifiers/metering_rpc_agent_api.py index fa491653960..e92045a2c0f 100644 --- a/neutron/api/rpc/agentnotifiers/metering_rpc_agent_api.py +++ b/neutron/api/rpc/agentnotifiers/metering_rpc_agent_api.py @@ -26,7 +26,7 @@ from neutron.db import agentschedulers_db LOG = logging.getLogger(__name__) -class MeteringAgentNotifyAPI(object): +class MeteringAgentNotifyAPI: """API for plugin to notify L3 metering agent.""" def __init__(self, topic=topics.METERING_AGENT): diff --git a/neutron/api/rpc/agentnotifiers/utils.py b/neutron/api/rpc/agentnotifiers/utils.py index dd004fc2576..60552b8c9f0 100644 --- a/neutron/api/rpc/agentnotifiers/utils.py +++ b/neutron/api/rpc/agentnotifiers/utils.py @@ -32,7 +32,7 @@ def _call_with_retry(max_attempts): def wrapper(f): def func_wrapper(*args, **kwargs): # (ivasilevskaya) think of a more informative data to log - action = '%(func)s' % {'func': getattr(f, '__name__', f)} + action = '{func}'.format(func=getattr(f, '__name__', f)) for attempt in range(1, max_attempts + 1): try: return f(*args, **kwargs) diff --git a/neutron/api/rpc/callbacks/resource_manager.py b/neutron/api/rpc/callbacks/resource_manager.py index 37d394c919c..d4eac88a91d 100644 --- a/neutron/api/rpc/callbacks/resource_manager.py +++ b/neutron/api/rpc/callbacks/resource_manager.py @@ -30,7 +30,7 @@ def _validate_resource_type(resource_type): raise exceptions.Invalid(element='resource', value=resource_type) -class ResourceCallbacksManager(object, metaclass=abc.ABCMeta): +class ResourceCallbacksManager(metaclass=abc.ABCMeta): """A callback system that allows information providers in a loose manner. """ @@ -39,10 +39,10 @@ class ResourceCallbacksManager(object, metaclass=abc.ABCMeta): def __new__(cls, *args, **kwargs): if not cls._singleton: - return super(ResourceCallbacksManager, cls).__new__(cls) + return super().__new__(cls) if not hasattr(cls, '_instance'): - cls._instance = super(ResourceCallbacksManager, cls).__new__(cls) + cls._instance = super().__new__(cls) return cls._instance @abc.abstractmethod diff --git a/neutron/api/rpc/callbacks/version_manager.py b/neutron/api/rpc/callbacks/version_manager.py index 52b82ba973e..82e2810a3ac 100644 --- a/neutron/api/rpc/callbacks/version_manager.py +++ b/neutron/api/rpc/callbacks/version_manager.py @@ -44,7 +44,7 @@ AgentConsumer = collections.namedtuple('AgentConsumer', ['agent_type', AgentConsumer.__repr__ = lambda self: '%s@%s' % self -class ResourceConsumerTracker(object): +class ResourceConsumerTracker: """Class passed down to collect consumer's resource versions. This class is responsible for fetching the local versions of @@ -194,7 +194,7 @@ class ResourceConsumerTracker(object): self._versions = versions -class CachedResourceConsumerTracker(object): +class CachedResourceConsumerTracker: """This class takes care of the caching logic of versions.""" def __init__(self): diff --git a/neutron/api/rpc/handlers/dhcp_rpc.py b/neutron/api/rpc/handlers/dhcp_rpc.py index 2faadc600d7..fa2c768e869 100644 --- a/neutron/api/rpc/handlers/dhcp_rpc.py +++ b/neutron/api/rpc/handlers/dhcp_rpc.py @@ -43,7 +43,7 @@ from neutron.quota import resource_registry LOG = logging.getLogger(__name__) -class DhcpRpcCallback(object): +class DhcpRpcCallback: """DHCP agent RPC callback in plugin implementations. This class implements the server side of an rpc interface. The client diff --git a/neutron/api/rpc/handlers/dvr_rpc.py b/neutron/api/rpc/handlers/dvr_rpc.py index d2807c94f44..2e2248039d4 100644 --- a/neutron/api/rpc/handlers/dvr_rpc.py +++ b/neutron/api/rpc/handlers/dvr_rpc.py @@ -25,7 +25,7 @@ import oslo_messaging LOG = logging.getLogger(__name__) -class DVRServerRpcApi(object): +class DVRServerRpcApi: """Agent-side RPC (stub) for agent-to-plugin interaction. This class implements the client side of an rpc interface. The server side @@ -66,7 +66,7 @@ class DVRServerRpcApi(object): context, 'get_subnet_for_dvr', subnet=subnet, fixed_ips=fixed_ips) -class DVRServerRpcCallback(object): +class DVRServerRpcCallback: """Plugin-side RPC (implementation) for agent-to-plugin interaction. This class implements the server side of an rpc interface. The client side @@ -111,7 +111,7 @@ class DVRServerRpcCallback(object): context, subnet, fixed_ips=fixed_ips) -class DVRAgentRpcApiMixin(object): +class DVRAgentRpcApiMixin: """Plugin-side RPC (stub) for plugin-to-agent interaction.""" DVR_RPC_VERSION = "1.0" @@ -130,7 +130,7 @@ class DVRAgentRpcApiMixin(object): cctxt.cast(context, 'dvr_mac_address_update', dvr_macs=dvr_macs) -class DVRAgentRpcCallbackMixin(object): +class DVRAgentRpcCallbackMixin: """Agent-side RPC (implementation) for plugin-to-agent interaction.""" def dvr_mac_address_update(self, context, **kwargs): diff --git a/neutron/api/rpc/handlers/l3_rpc.py b/neutron/api/rpc/handlers/l3_rpc.py index aa622dfe963..a7801d8420e 100644 --- a/neutron/api/rpc/handlers/l3_rpc.py +++ b/neutron/api/rpc/handlers/l3_rpc.py @@ -31,7 +31,7 @@ from sqlalchemy import orm LOG = logging.getLogger(__name__) -class L3RpcCallback(object): +class L3RpcCallback: """L3 agent RPC callback in plugin implementations.""" # 1.0 L3PluginApi BASE_RPC_API_VERSION diff --git a/neutron/api/rpc/handlers/metadata_rpc.py b/neutron/api/rpc/handlers/metadata_rpc.py index ae1aa373ec4..7bf0d088dee 100644 --- a/neutron/api/rpc/handlers/metadata_rpc.py +++ b/neutron/api/rpc/handlers/metadata_rpc.py @@ -18,7 +18,7 @@ from neutron_lib.plugins import directory import oslo_messaging -class MetadataRpcCallback(object): +class MetadataRpcCallback: """Metadata agent RPC callback in plugin implementations. This class implements the server side of an rpc interface used by the diff --git a/neutron/api/rpc/handlers/resources_rpc.py b/neutron/api/rpc/handlers/resources_rpc.py index baaf1bba4cd..bcab3e29a2b 100644 --- a/neutron/api/rpc/handlers/resources_rpc.py +++ b/neutron/api/rpc/handlers/resources_rpc.py @@ -72,7 +72,7 @@ def resource_type_versioned_topic(resource_type, version=None): 'version': version or cls.VERSION} -class ResourcesPullRpcApi(object): +class ResourcesPullRpcApi: """Agent-side RPC (stub) for agent-to-plugin interaction. This class implements the client side of an rpc interface. The server side @@ -83,7 +83,7 @@ class ResourcesPullRpcApi(object): def __new__(cls): # make it a singleton if not hasattr(cls, '_instance'): - cls._instance = super(ResourcesPullRpcApi, cls).__new__(cls) + cls._instance = super().__new__(cls) target = oslo_messaging.Target( topic=topics.PLUGIN, version='1.1', namespace=constants.RPC_NAMESPACE_RESOURCES) @@ -116,7 +116,7 @@ class ResourcesPullRpcApi(object): for primitive in primitives] -class ResourcesPullRpcCallback(object): +class ResourcesPullRpcCallback: """Plugin-side RPC (implementation) for agent-to-plugin interaction. This class implements the server side of an rpc interface. The client side @@ -148,7 +148,7 @@ class ResourcesPullRpcCallback(object): **filter_kwargs)] -class ResourcesPushToServersRpcApi(object): +class ResourcesPushToServersRpcApi: """Publisher-side RPC (stub) for plugin-to-plugin fanout interaction. This class implements the client side of an rpc interface. The receiver @@ -173,7 +173,7 @@ class ResourcesPushToServersRpcApi(object): version_map=version_map) -class ResourcesPushToServerRpcCallback(object): +class ResourcesPushToServerRpcCallback: """Receiver-side RPC (implementation) for plugin-to-plugin interaction. This class implements the receiver side of an rpc interface. @@ -195,7 +195,7 @@ class ResourcesPushToServerRpcCallback(object): version_manager.update_versions(consumer_id, version_map) -class ResourcesPushRpcApi(object): +class ResourcesPushRpcApi: """Plugin-side RPC for plugin-to-agents interaction. This interface is designed to push versioned object updates to interested @@ -237,7 +237,7 @@ class ResourcesPushRpcApi(object): LOG.debug( "Pushing event %s for resources: %s", event_type, {t: - ["ID=%s,revision_number=%s" % ( + ["ID={},revision_number={}".format( getattr(obj, 'id', None), getattr(obj, 'revision_number', None)) for obj in resources_by_type[t]] @@ -262,7 +262,7 @@ class ResourcesPushRpcApi(object): event_type=event_type) -class ResourcesPushRpcCallback(object): +class ResourcesPushRpcCallback: """Agent-side RPC for plugin-to-agents interaction. This class implements the receiver for notification about versioned objects diff --git a/neutron/api/rpc/handlers/securitygroups_rpc.py b/neutron/api/rpc/handlers/securitygroups_rpc.py index 752843a2776..5492258230b 100644 --- a/neutron/api/rpc/handlers/securitygroups_rpc.py +++ b/neutron/api/rpc/handlers/securitygroups_rpc.py @@ -33,7 +33,7 @@ from neutron.db import securitygroups_rpc_base as sg_rpc_base LOG = logging.getLogger(__name__) -class SecurityGroupServerRpcApi(object): +class SecurityGroupServerRpcApi: """RPC client for security group methods in the plugin. This class implements the client side of an rpc interface. This interface @@ -65,7 +65,7 @@ class SecurityGroupServerRpcApi(object): call_version=call_version) -class SecurityGroupServerRpcCallback(object): +class SecurityGroupServerRpcCallback: """Callback for SecurityGroup agent RPC in plugin implementations. This class implements the server side of an rpc interface. The client side @@ -89,11 +89,11 @@ class SecurityGroupServerRpcCallback(object): return directory.get_plugin() def _get_devices_info(self, context, devices): - return dict( - (port['id'], port) + return { + port['id']: port for port in self.plugin.get_ports_from_devices(context, devices) if port and not net.is_port_trusted(port) - ) + } def security_group_rules_for_devices(self, context, **kwargs): """Callback method to return security group rules for each port. @@ -141,9 +141,9 @@ class SecurityGroupServerRpcCallback(object): for sg_id in sg_ids: member_ips = sg_member_ips.get(sg_id, {}) ipv4_ips = member_ips.get("IPv4", set()) - comp_ipv4_ips = set([ip for ip, _mac in ipv4_ips]) + comp_ipv4_ips = {ip for ip, _mac in ipv4_ips} ipv6_ips = member_ips.get("IPv6", set()) - comp_ipv6_ips = set([ip for ip, _mac in ipv6_ips]) + comp_ipv6_ips = {ip for ip, _mac in ipv6_ips} comp_ips = {"IPv4": comp_ipv4_ips, "IPv6": comp_ipv6_ips} sg_member_ips[sg_id] = comp_ips @@ -151,7 +151,7 @@ class SecurityGroupServerRpcCallback(object): return sg_info -class SecurityGroupAgentRpcApiMixin(object): +class SecurityGroupAgentRpcApiMixin: """RPC client for security group methods to the agent. This class implements the client side of an rpc interface. This interface @@ -193,7 +193,7 @@ class SecurityGroupAgentRpcApiMixin(object): security_groups=security_groups) -class SecurityGroupAgentRpcCallbackMixin(object): +class SecurityGroupAgentRpcCallbackMixin: """A mix-in that enable SecurityGroup support in agent implementations. This class implements the server side of an rpc interface. The client side @@ -281,8 +281,8 @@ class SecurityGroupServerAPIShim(sg_rpc_base.SecurityGroupInfoAPIMixin): def get_secgroup_ids_for_address_group(self, address_group_id): filters = {'remote_address_group_id': (address_group_id, )} - return set([rule.security_group_id for rule in - self.rcache.get_resources('SecurityGroupRule', filters)]) + return {rule.security_group_id for rule in + self.rcache.get_resources('SecurityGroupRule', filters)} def _add_child_sg_rules(self, rtype, event, trigger, payload): # whenever we receive a full security group, add all child rules @@ -420,8 +420,8 @@ class SecurityGroupServerAPIShim(sg_rpc_base.SecurityGroupInfoAPIMixin): if not ports: return [] results = [] - sg_ids = set((sg_id for p in ports.values() - for sg_id in p['security_group_ids'])) + sg_ids = {sg_id for p in ports.values() + for sg_id in p['security_group_ids']} rules_by_sgid = collections.defaultdict(list) for sg_id in sg_ids: filters = {'security_group_id': (sg_id, )} @@ -434,8 +434,8 @@ class SecurityGroupServerAPIShim(sg_rpc_base.SecurityGroupInfoAPIMixin): return results def _select_sg_ids_for_ports(self, context, ports): - sg_ids = set((sg_id for p in ports.values() - for sg_id in p['security_group_ids'])) + sg_ids = {sg_id for p in ports.values() + for sg_id in p['security_group_ids']} return [(sg_id, ) for sg_id in sg_ids] def _get_sgs_stateful_flag(self, context, sg_ids): diff --git a/neutron/api/v2/base.py b/neutron/api/v2/base.py index 1684c02cdc9..6cba14cc70a 100644 --- a/neutron/api/v2/base.py +++ b/neutron/api/v2/base.py @@ -43,7 +43,7 @@ from neutron.quota import resource_registry LOG = logging.getLogger(__name__) -class Controller(object): +class Controller: LIST = 'list' SHOW = 'show' CREATE = 'create' @@ -129,12 +129,12 @@ class Controller(object): self._parent_id_name = None parent_part = '' self._plugin_handlers = { - self.LIST: 'get%s_%s' % (parent_part, self._collection), - self.SHOW: 'get%s_%s' % (parent_part, self._resource) + self.LIST: 'get{}_{}'.format(parent_part, self._collection), + self.SHOW: 'get{}_{}'.format(parent_part, self._resource) } for action in [self.CREATE, self.UPDATE, self.DELETE]: - self._plugin_handlers[action] = '%s%s_%s' % (action, parent_part, - self._resource) + self._plugin_handlers[action] = '{}{}_{}'.format( + action, parent_part, self._resource) def _get_primary_key(self, default_primary_key='id'): for key, value in self._attr_info.items(): @@ -176,8 +176,8 @@ class Controller(object): if attr_data and attr_data['is_visible']: if policy.check( context, - '%s:%s' % (self._plugin_handlers[self.SHOW], - attr_name), + '{}:{}'.format(self._plugin_handlers[self.SHOW], + attr_name), data, might_not_exist=True, pluralized=self._collection): @@ -824,13 +824,14 @@ class Controller(object): self.parent['member_name'] in service_const.EXT_PARENT_RESOURCE_MAPPING): resource_item.setdefault( - "%s_%s" % (constants.EXT_PARENT_PREFIX, - self._parent_id_name), + "{}_{}".format(constants.EXT_PARENT_PREFIX, + self._parent_id_name), parent_id) # If this func is called by create/update/delete, we just add. else: resource_item.setdefault( - "%s_%s" % (constants.EXT_PARENT_PREFIX, self._parent_id_name), + "{}_{}".format(constants.EXT_PARENT_PREFIX, + self._parent_id_name), parent_id) def _belongs_to_default_sg(self, request, resource_item): diff --git a/neutron/api/views/versions.py b/neutron/api/views/versions.py index d050394b009..972010ea98c 100644 --- a/neutron/api/views/versions.py +++ b/neutron/api/views/versions.py @@ -23,7 +23,7 @@ def get_view_builder(req): return ViewBuilder(base_url) -class ViewBuilder(object): +class ViewBuilder: def __init__(self, base_url): """Object initialization. diff --git a/neutron/api/wsgi.py b/neutron/api/wsgi.py index df1c6d6951c..5b0a59dd327 100644 --- a/neutron/api/wsgi.py +++ b/neutron/api/wsgi.py @@ -61,8 +61,8 @@ class WorkerService(neutron_worker.NeutronBaseWorker): """Wraps a worker to be handled by ProcessLauncher""" def __init__(self, service, application, set_proctitle, disable_ssl=False, worker_process_count=0, desc=None): - super(WorkerService, self).__init__(worker_process_count, - set_proctitle) + super().__init__(worker_process_count, + set_proctitle) self._service = service self._application = application @@ -72,7 +72,7 @@ class WorkerService(neutron_worker.NeutronBaseWorker): def start(self, desc=None): desc = desc or self.desc - super(WorkerService, self).start(desc=desc) + super().start(desc=desc) # When api worker is stopped it kills the eventlet wsgi server which # internally closes the wsgi server socket object. This server socket # object becomes not usable which leads to "Bad file descriptor" @@ -99,7 +99,7 @@ class WorkerService(neutron_worker.NeutronBaseWorker): config.reset_service() -class Server(object): +class Server: """Server class to manage multiple WSGI sockets and applications.""" def __init__(self, name, num_threads=None, disable_ssl=False): @@ -141,7 +141,7 @@ class Server(object): sock = eventlet.listen(bind_addr, backlog=backlog, family=family) - except socket.error as err: + except OSError as err: with excutils.save_and_reraise_exception() as ctxt: if err.errno == errno.EADDRINUSE: ctxt.reraise = False @@ -250,7 +250,7 @@ class Request(wsgi.Request): if len(parts) > 1: _format = parts[1] if _format in ['json']: - return 'application/{0}'.format(_format) + return f'application/{_format}' # Then look up content header type_from_header = self.get_content_type() @@ -296,7 +296,7 @@ class Request(wsgi.Request): return self.environ['neutron.context'] -class ActionDispatcher(object): +class ActionDispatcher: """Maps method name to local methods through action name.""" def dispatch(self, *args, **kwargs): @@ -338,7 +338,7 @@ class ResponseHeaderSerializer(ActionDispatcher): response.status_int = 200 -class ResponseSerializer(object): +class ResponseSerializer: """Encode the necessary pieces into a response object.""" def __init__(self, body_serializers=None, headers_serializer=None): @@ -411,7 +411,7 @@ class RequestHeadersDeserializer(ActionDispatcher): return {} -class RequestDeserializer(object): +class RequestDeserializer: """Break up a Request object into more useful pieces.""" def __init__(self, body_deserializers=None, headers_deserializer=None): @@ -498,7 +498,7 @@ class RequestDeserializer(object): return args -class Application(object): +class Application: """Base WSGI application wrapper. Subclasses need to implement __call__.""" @classmethod @@ -690,7 +690,7 @@ class Fault(webob.exc.HTTPException): # NOTE(salvatore-orlando): this class will go once the # extension API framework is updated -class Controller(object): +class Controller: """WSGI app that dispatched to methods. WSGI app that reads routing information supplied by RoutesMiddleware @@ -763,7 +763,7 @@ class Controller(object): # NOTE(salvatore-orlando): this class will go once the # extension API framework is updated -class Serializer(object): +class Serializer: """Serializes and deserializes dictionaries to certain MIME types.""" def __init__(self, metadata=None): diff --git a/neutron/cmd/destroy_patch_ports.py b/neutron/cmd/destroy_patch_ports.py index 9ff0bb0f802..a5af060dbc3 100644 --- a/neutron/cmd/destroy_patch_ports.py +++ b/neutron/cmd/destroy_patch_ports.py @@ -40,7 +40,7 @@ def get_patch_port_names(bridge_name): return int_if_name, phys_if_name -class PatchPortCleaner(object): +class PatchPortCleaner: def __init__(self, config): LOG.debug("Get OVS bridge mappings") mappings = helpers.parse_mappings(config.OVS.bridge_mappings) diff --git a/neutron/cmd/eventlet/agents/dhcp.py b/neutron/cmd/eventlet/agents/dhcp.py index 17f30aab0ca..38536f147d0 100644 --- a/neutron/cmd/eventlet/agents/dhcp.py +++ b/neutron/cmd/eventlet/agents/dhcp.py @@ -17,7 +17,7 @@ from neutron_lib import constants def main(): - proctitle = "%s (%s)" % ( + proctitle = "{} ({})".format( constants.AGENT_PROCESS_DHCP, setproctitle.getproctitle()) setproctitle.setproctitle(proctitle) diff --git a/neutron/cmd/eventlet/agents/l3.py b/neutron/cmd/eventlet/agents/l3.py index 29f0db73de0..5d46d771ae1 100644 --- a/neutron/cmd/eventlet/agents/l3.py +++ b/neutron/cmd/eventlet/agents/l3.py @@ -17,7 +17,7 @@ from neutron_lib import constants def main(): - proctitle = "%s (%s)" % ( + proctitle = "{} ({})".format( constants.AGENT_PROCESS_L3, setproctitle.getproctitle()) setproctitle.setproctitle(proctitle) diff --git a/neutron/cmd/eventlet/agents/metadata.py b/neutron/cmd/eventlet/agents/metadata.py index 58d49f0315d..de3d89f7375 100644 --- a/neutron/cmd/eventlet/agents/metadata.py +++ b/neutron/cmd/eventlet/agents/metadata.py @@ -17,7 +17,7 @@ from neutron_lib import constants def main(): - proctitle = "%s (%s)" % ( + proctitle = "{} ({})".format( constants.AGENT_PROCESS_METADATA, setproctitle.getproctitle()) setproctitle.setproctitle(proctitle) diff --git a/neutron/cmd/eventlet/agents/ovn_metadata.py b/neutron/cmd/eventlet/agents/ovn_metadata.py index 284829c5852..f5ce15da9f9 100644 --- a/neutron/cmd/eventlet/agents/ovn_metadata.py +++ b/neutron/cmd/eventlet/agents/ovn_metadata.py @@ -17,7 +17,7 @@ from neutron_lib import constants def main(): - proctitle = "%s (%s)" % ( + proctitle = "{} ({})".format( constants.AGENT_PROCESS_OVN_METADATA, setproctitle.getproctitle()) setproctitle.setproctitle(proctitle) diff --git a/neutron/cmd/eventlet/agents/ovn_neutron_agent.py b/neutron/cmd/eventlet/agents/ovn_neutron_agent.py index ce3424ecbc3..5a4bdc5f8f7 100644 --- a/neutron/cmd/eventlet/agents/ovn_neutron_agent.py +++ b/neutron/cmd/eventlet/agents/ovn_neutron_agent.py @@ -20,7 +20,7 @@ AGENT_PROCESS_OVN_NEUTRON_AGENT = 'neutron-ovn-agent' def main(): - proctitle = "%s (%s)" % (AGENT_PROCESS_OVN_NEUTRON_AGENT, - setproctitle.getproctitle()) + proctitle = "{} ({})".format(AGENT_PROCESS_OVN_NEUTRON_AGENT, + setproctitle.getproctitle()) setproctitle.setproctitle(proctitle) ovn_neutron_agent.main() diff --git a/neutron/cmd/eventlet/plugins/linuxbridge_neutron_agent.py b/neutron/cmd/eventlet/plugins/linuxbridge_neutron_agent.py index 04baac62d5c..7b34f73769d 100644 --- a/neutron/cmd/eventlet/plugins/linuxbridge_neutron_agent.py +++ b/neutron/cmd/eventlet/plugins/linuxbridge_neutron_agent.py @@ -21,7 +21,7 @@ from neutron_lib import constants def main(): - proctitle = "%s (%s)" % ( + proctitle = "{} ({})".format( constants.AGENT_PROCESS_LINUXBRIDGE, setproctitle.getproctitle()) setproctitle.setproctitle(proctitle) diff --git a/neutron/cmd/eventlet/plugins/macvtap_neutron_agent.py b/neutron/cmd/eventlet/plugins/macvtap_neutron_agent.py index 61b46df1960..5d888bf904b 100644 --- a/neutron/cmd/eventlet/plugins/macvtap_neutron_agent.py +++ b/neutron/cmd/eventlet/plugins/macvtap_neutron_agent.py @@ -20,7 +20,7 @@ from neutron_lib import constants def main(): - proctitle = "%s (%s)" % ( + proctitle = "{} ({})".format( constants.AGENT_PROCESS_MACVTAP, setproctitle.getproctitle()) setproctitle.setproctitle(proctitle) diff --git a/neutron/cmd/eventlet/plugins/ovs_neutron_agent.py b/neutron/cmd/eventlet/plugins/ovs_neutron_agent.py index 2e68fecb6ef..81ab9956f20 100644 --- a/neutron/cmd/eventlet/plugins/ovs_neutron_agent.py +++ b/neutron/cmd/eventlet/plugins/ovs_neutron_agent.py @@ -20,7 +20,7 @@ from neutron_lib import constants def main(): - proctitle = "%s (%s)" % ( + proctitle = "{} ({})".format( constants.AGENT_PROCESS_OVS, setproctitle.getproctitle()) setproctitle.setproctitle(proctitle) diff --git a/neutron/cmd/eventlet/plugins/sriov_nic_neutron_agent.py b/neutron/cmd/eventlet/plugins/sriov_nic_neutron_agent.py index a56ac24ef30..18be3fce978 100644 --- a/neutron/cmd/eventlet/plugins/sriov_nic_neutron_agent.py +++ b/neutron/cmd/eventlet/plugins/sriov_nic_neutron_agent.py @@ -20,7 +20,7 @@ from neutron_lib import constants def main(): - proctitle = "%s (%s)" % ( + proctitle = "{} ({})".format( constants.AGENT_PROCESS_NIC_SWITCH, setproctitle.getproctitle()) setproctitle.setproctitle(proctitle) diff --git a/neutron/cmd/eventlet/services/metering_agent.py b/neutron/cmd/eventlet/services/metering_agent.py index caa64422d4c..33b0ff7139a 100644 --- a/neutron/cmd/eventlet/services/metering_agent.py +++ b/neutron/cmd/eventlet/services/metering_agent.py @@ -17,7 +17,7 @@ from neutron_lib import constants def main(): - proctitle = "%s (%s)" % ( + proctitle = "{} ({})".format( constants.AGENT_PROCESS_METERING, setproctitle.getproctitle()) setproctitle.setproctitle(proctitle) diff --git a/neutron/cmd/netns_cleanup.py b/neutron/cmd/netns_cleanup.py index ac5ddfddd25..eda78319510 100644 --- a/neutron/cmd/netns_cleanup.py +++ b/neutron/cmd/netns_cleanup.py @@ -50,7 +50,7 @@ class PidsInNamespaceException(Exception): pass -class FakeDhcpPlugin(object): +class FakeDhcpPlugin: """Fake RPC plugin to bypass any RPC calls.""" def __getattribute__(self, name): def fake_method(*args): @@ -105,8 +105,8 @@ def eligible_for_deletion(conf, namespace, force=False): prefixes = NS_PREFIXES.get(conf.agent_type) else: prefixes = itertools.chain(*NS_PREFIXES.values()) - ns_mangling_pattern = '(%s%s)' % ('|'.join(prefixes), - constants.UUID_PATTERN) + ns_mangling_pattern = '({}{})'.format('|'.join(prefixes), + constants.UUID_PATTERN) # filter out namespaces without UUID as the name if not re.match(ns_mangling_pattern, namespace): diff --git a/neutron/cmd/ovn/neutron_ovn_db_sync_util.py b/neutron/cmd/ovn/neutron_ovn_db_sync_util.py index 9635114144a..fae637b5bc3 100644 --- a/neutron/cmd/ovn/neutron_ovn_db_sync_util.py +++ b/neutron/cmd/ovn/neutron_ovn_db_sync_util.py @@ -94,7 +94,7 @@ class OVNMechanismDriver(mech_driver.OVNMechanismDriver): self.ovn_client.delete_port(context.plugin_context, port['id']) -class AgentNotifierApi(object): +class AgentNotifierApi: """Default Agent Notifier class for ovn-db-sync-util. This class implements empty methods so that when creating resources in diff --git a/neutron/cmd/ovs_cleanup.py b/neutron/cmd/ovs_cleanup.py index 8c6e1dd2cbc..b694c3e3ee1 100644 --- a/neutron/cmd/ovs_cleanup.py +++ b/neutron/cmd/ovs_cleanup.py @@ -64,7 +64,7 @@ def main(): def do_main(conf): - configuration_bridges = set([conf.OVS.integration_bridge]) + configuration_bridges = {conf.OVS.integration_bridge} ovs = ovs_lib.BaseOVS() ovs_bridges = set(ovs.get_bridges()) available_configuration_bridges = configuration_bridges & ovs_bridges diff --git a/neutron/cmd/sanity/checks.py b/neutron/cmd/sanity/checks.py index c19610be333..8231aeee6e0 100644 --- a/neutron/cmd/sanity/checks.py +++ b/neutron/cmd/sanity/checks.py @@ -337,7 +337,7 @@ def bridge_firewalling_enabled(): return True -class KeepalivedIPv6Test(object): +class KeepalivedIPv6Test: def __init__(self, ha_port, gw_port, gw_vip, default_gw): l3_config.register_l3_agent_config_opts(l3_config.OPTS, cfg.CONF) self.ha_port = ha_port diff --git a/neutron/cmd/sanity_check.py b/neutron/cmd/sanity_check.py index e2e2347c4ab..6a40d316136 100644 --- a/neutron/cmd/sanity_check.py +++ b/neutron/cmd/sanity_check.py @@ -54,7 +54,7 @@ class BoolOptCallback(cfg.BoolOpt): if 'default' not in kwargs: kwargs['default'] = False self.callback = callback - super(BoolOptCallback, self).__init__(name, **kwargs) + super().__init__(name, **kwargs) def check_ovs_vxlan(): diff --git a/neutron/cmd/upgrade_checks/base.py b/neutron/cmd/upgrade_checks/base.py index b945cf3b59b..edfd967be94 100644 --- a/neutron/cmd/upgrade_checks/base.py +++ b/neutron/cmd/upgrade_checks/base.py @@ -15,7 +15,7 @@ import abc -class BaseChecks(object, metaclass=abc.ABCMeta): +class BaseChecks(metaclass=abc.ABCMeta): """Base class providing upgrade checks. diff --git a/neutron/common/_deprecate.py b/neutron/common/_deprecate.py index 18c66f507f8..7f6db5ad123 100644 --- a/neutron/common/_deprecate.py +++ b/neutron/common/_deprecate.py @@ -25,7 +25,7 @@ import debtcollector from neutron._i18n import _ -class _MovedGlobals(object): +class _MovedGlobals: """Override a module to deprecate moved globals. This class is used when globals (attributes of a module) need to be @@ -109,12 +109,12 @@ class _MovedGlobals(object): new_module, new_name = self._mg__default_new_mod, name if new_module and new_name in vars(new_module): - old_location = '%s.%s' % (old_module.__name__, name) - new_location = '%s.%s' % (new_module.__name__, new_name) + old_location = '{}.{}'.format(old_module.__name__, name) + new_location = '{}.{}'.format(new_module.__name__, new_name) changed = 'renamed' if old_module == new_module else 'moved' debtcollector.deprecate( old_location, - message='%s to %s' % (changed, new_location), + message='{} to {}'.format(changed, new_location), stacklevel=4) return vars(new_module)[new_name] @@ -127,7 +127,7 @@ class _MovedGlobals(object): def __setattr__(self, name, val): if name.startswith('_mg__'): - return super(_MovedGlobals, self).__setattr__(name, val) + return super().__setattr__(name, val) self._mg__my_globals[name] = val def __delattr__(self, name): diff --git a/neutron/common/cache_utils.py b/neutron/common/cache_utils.py index b29931acaf2..c03c10d3d1e 100644 --- a/neutron/common/cache_utils.py +++ b/neutron/common/cache_utils.py @@ -56,7 +56,7 @@ def _get_memory_cache_region(expiration_time=5): return _get_cache_region(conf) -class cache_method_results(object): +class cache_method_results: """This decorator is intended for object methods only.""" def __init__(self, func): diff --git a/neutron/common/ipv6_utils.py b/neutron/common/ipv6_utils.py index ee715af0786..35b1caa0d4b 100644 --- a/neutron/common/ipv6_utils.py +++ b/neutron/common/ipv6_utils.py @@ -52,7 +52,7 @@ def valid_ipv6_url(host, port): square brackets always required in ipv6 URI. """ if netutils.is_valid_ipv6(host): - uri = '[%s]:%s' % (host, port) + uri = '[{}]:{}'.format(host, port) else: - uri = '%s:%s' % (host, port) + uri = '{}:{}'.format(host, port) return uri diff --git a/neutron/common/ovn/acl.py b/neutron/common/ovn/acl.py index 07a7aeb9c49..e62a354c058 100644 --- a/neutron/common/ovn/acl.py +++ b/neutron/common/ovn/acl.py @@ -63,8 +63,8 @@ def acl_direction(r, port=None, port_group=None): portdir = 'inport' if port: - return '%s == "%s"' % (portdir, port['id']) - return '%s == @%s' % (portdir, port_group) + return '{} == "{}"'.format(portdir, port['id']) + return '{} == @{}'.format(portdir, port_group) def acl_ethertype(r): @@ -86,7 +86,7 @@ def acl_remote_ip_prefix(r, ip_version): if not r['normalized_cidr']: return '' src_or_dst = 'src' if r['direction'] == const.INGRESS_DIRECTION else 'dst' - return ' && %s.%s == %s' % ( + return ' && {}.{} == {}'.format( ip_version, src_or_dst, r['normalized_cidr']) @@ -148,7 +148,7 @@ def add_acls_for_drop_port_group(pg_name): "name": [], "severity": [], "direction": direction, - "match": '%s == @%s && ip' % (p, pg_name)} + "match": '{} == @{} && ip'.format(p, pg_name)} acl_list.append(acl) return acl_list @@ -166,7 +166,7 @@ def drop_all_ip_traffic_for_port(port): "name": [], "severity": [], "direction": direction, - "match": '%s == "%s" && ip' % (p, port['id']), + "match": '{} == "{}" && ip'.format(p, port['id']), "external_ids": {'neutron:lport': port['id']}} acl_list.append(acl) return acl_list @@ -230,7 +230,7 @@ def acl_remote_group_id(r, ip_version): src_or_dst = 'src' if r['direction'] == const.INGRESS_DIRECTION else 'dst' addrset_name = utils.ovn_pg_addrset_name(r['remote_group_id'], ip_version) - return ' && %s.%s == $%s' % (ip_version, src_or_dst, addrset_name) + return ' && {}.{} == ${}'.format(ip_version, src_or_dst, addrset_name) def _add_sg_rule_acl_for_port_group(port_group, stateful, r): diff --git a/neutron/common/ovn/hash_ring_manager.py b/neutron/common/ovn/hash_ring_manager.py index 59d6107f4ad..28dfad25c5a 100644 --- a/neutron/common/ovn/hash_ring_manager.py +++ b/neutron/common/ovn/hash_ring_manager.py @@ -28,7 +28,7 @@ from neutron_lib import context LOG = log.getLogger(__name__) -class HashRingManager(object): +class HashRingManager: def __init__(self, group_name): self._hash_ring = None diff --git a/neutron/common/ovn/utils.py b/neutron/common/ovn/utils.py index 4f6fc929336..a4d82972aae 100644 --- a/neutron/common/ovn/utils.py +++ b/neutron/common/ovn/utils.py @@ -70,7 +70,7 @@ HAChassisGroupInfo = collections.namedtuple( 'ignore_chassis', 'external_ids']) -class OvsdbClientCommand(object): +class OvsdbClientCommand: _CONNECTION = 0 _PRIVATE_KEY = 1 _CERTIFICATE = 2 @@ -104,12 +104,12 @@ class OvsdbClientCommand(object): db = command[0] except IndexError: raise KeyError( - _("%s or %s schema must be specified in the command %s" % ( + _("{} or {} schema must be specified in the command {}".format( cls.OVN_Northbound, cls.OVN_Southbound, command))) if db not in (cls.OVN_Northbound, cls.OVN_Southbound): raise KeyError( - _("%s or %s schema must be specified in the command %s" % ( + _("{} or {} schema must be specified in the command {}".format( cls.OVN_Northbound, cls.OVN_Southbound, command))) cmd = ['ovsdb-client', @@ -202,7 +202,7 @@ def ovn_name(id): # is a UUID. If so then there will be no matches. # We prefix the UUID to enable us to use the Neutron UUID when # updating, deleting etc. - return "%s%s" % (constants.OVN_NAME_PREFIX, id) + return "{}{}".format(constants.OVN_NAME_PREFIX, id) def ovn_lrouter_port_name(id): @@ -249,7 +249,7 @@ def ovn_addrset_name(sg_id, ip_version): # as-- # with all '-' replaced with '_'. This replacement is necessary # because OVN doesn't support '-' in an address set name. - return ('as-%s-%s' % (ip_version, sg_id)).replace('-', '_') + return ('as-{}-{}'.format(ip_version, sg_id)).replace('-', '_') def ovn_pg_addrset_name(sg_id, ip_version): @@ -258,7 +258,7 @@ def ovn_pg_addrset_name(sg_id, ip_version): # pg-- # with all '-' replaced with '_'. This replacement is necessary # because OVN doesn't support '-' in an address set name. - return ('pg-%s-%s' % (sg_id, ip_version)).replace('-', '_') + return ('pg-{}-{}'.format(sg_id, ip_version)).replace('-', '_') def ovn_port_group_name(sg_id): @@ -666,7 +666,7 @@ def get_system_dns_resolvers(resolver_file=DNS_RESOLVER_FILE): if not os.path.exists(resolver_file): return resolvers - with open(resolver_file, 'r') as rconf: + with open(resolver_file) as rconf: for line in rconf.readlines(): if not line.startswith('nameserver'): continue @@ -910,7 +910,7 @@ def parse_ovn_lb_port_forwarding(ovn_rtr_lb_pfs): ovn_vips = ovn_lb.vips for vip, ips in ovn_vips.items(): for ip in ips.split(','): - fip_dict_proto.add("{} {}".format(vip, ip)) + fip_dict_proto.add(f"{vip} {ip}") fip_dict[protocol] = fip_dict_proto result[fip_id] = fip_dict return result @@ -1315,8 +1315,8 @@ def validate_port_binding_and_virtual_port( return fixed_ips = port.get('fixed_ips', []) - subnet_ids = set([fixed_ip['subnet_id'] for fixed_ip in fixed_ips - if 'subnet_id' in fixed_ip]) + subnet_ids = {fixed_ip['subnet_id'] for fixed_ip in fixed_ips + if 'subnet_id' in fixed_ip} if not subnet_ids: # If the port has no fixed_ips/subnets, it cannot be virtual. return diff --git a/neutron/common/utils.py b/neutron/common/utils.py index 6c6c6e2a2ed..cd6409c97ec 100644 --- a/neutron/common/utils.py +++ b/neutron/common/utils.py @@ -75,7 +75,7 @@ class WaitTimeout(Exception): """Default exception coming from wait_until_true() function.""" -class LockWithTimer(object): +class LockWithTimer: def __init__(self, threshold): self._threshold = threshold self.timestamp = 0 @@ -149,8 +149,8 @@ def get_dhcp_agent_device_id(network_id, host, segmentation_id=None): local_hostname = host.split('.')[0] host_uuid = uuid.uuid5(uuid.NAMESPACE_DNS, str(local_hostname)) if not segmentation_id: - return 'dhcp%s-%s' % (host_uuid, network_id) - return 'dhcp%s-%s-%s' % (host_uuid, network_id, segmentation_id) + return 'dhcp{}-{}'.format(host_uuid, network_id) + return 'dhcp{}-{}-{}'.format(host_uuid, network_id, segmentation_id) def is_dns_servers_any_address(dns_servers, ip_version): @@ -160,7 +160,7 @@ def is_dns_servers_any_address(dns_servers, ip_version): netaddr.IPNetwork(dns_servers[0]).ip == ip_any) -class exception_logger(object): +class exception_logger: """Wrap a function and log raised exception :param logger: the logger to log the exception default is LOG.exception @@ -360,7 +360,7 @@ def is_version_greater_equal(version1, version2): versionutils.convert_version_to_tuple(version2)) -class DelayedStringRenderer(object): +class DelayedStringRenderer: """Takes a callable and its args and calls when __str__ is called Useful for when an argument to a logging statement is expensive to @@ -382,7 +382,7 @@ def _hex_format(port, mask=0): def hex_str(num): return format(num, '#06x') if mask > 0: - return "%s/%s" % (hex_str(port), hex_str(0xffff & ~mask)) + return "{}/{}".format(hex_str(port), hex_str(0xffff & ~mask)) return hex_str(port) @@ -745,7 +745,7 @@ def wait_until_true(predicate, timeout=60, sleep=1, exception=None): raise WaitTimeout(_("Timed out after %d seconds") % timeout) -class classproperty(object): +class classproperty: def __init__(self, f): self.func = f @@ -859,7 +859,7 @@ def disable_extension_by_service_plugin(core_plugin, service_plugin): def get_port_fixed_ips_set(port): - return set([ip["ip_address"] for ip in port.get("fixed_ips", [])]) + return {ip["ip_address"] for ip in port.get("fixed_ips", [])} def port_ip_changed(new_port, original_port): diff --git a/neutron/conf/services/extdns_designate_driver.py b/neutron/conf/services/extdns_designate_driver.py index f403cd50872..40d666b2a83 100644 --- a/neutron/conf/services/extdns_designate_driver.py +++ b/neutron/conf/services/extdns_designate_driver.py @@ -22,11 +22,11 @@ from neutron._i18n import _ class ZonePrefixIPv4(types.Integer): def __init__(self): - super(ZonePrefixIPv4, self).__init__( + super().__init__( min=8, max=24, type_name='IPv4 zone prefix') def __call__(self, value): - value = super(ZonePrefixIPv4, self).__call__(value) + value = super().__call__(value) if value % 8 != 0: raise ValueError(_('Should be multiple of 8')) return value @@ -34,11 +34,11 @@ class ZonePrefixIPv4(types.Integer): class ZonePrefixIPv6(types.Integer): def __init__(self): - super(ZonePrefixIPv6, self).__init__( + super().__init__( min=4, max=124, type_name='IPv6 zone prefix') def __call__(self, value): - value = super(ZonePrefixIPv6, self).__call__(value) + value = super().__call__(value) if value % 4 != 0: raise ValueError(_('Should be multiple of 4')) return value diff --git a/neutron/core_extensions/base.py b/neutron/core_extensions/base.py index 5f77940c357..3d1ec09fbc6 100644 --- a/neutron/core_extensions/base.py +++ b/neutron/core_extensions/base.py @@ -25,7 +25,7 @@ EVENT_UPDATE = 'update' CORE_RESOURCES = [NETWORK, PORT] -class CoreResourceExtension(object, metaclass=abc.ABCMeta): +class CoreResourceExtension(metaclass=abc.ABCMeta): @abc.abstractmethod def process_fields(self, context, resource_type, event_type, diff --git a/neutron/db/address_group_db.py b/neutron/db/address_group_db.py index c35776c9549..3f7db9e5ef9 100644 --- a/neutron/db/address_group_db.py +++ b/neutron/db/address_group_db.py @@ -69,7 +69,7 @@ class AddressGroupDbMixin(ag_ext.AddressGroupPluginBase): normalized_addrs = set() for addr in req_addrs: addr = netaddr.IPNetwork(addr) - normalized_addr = "%s/%s" % (addr.network, addr.prefixlen) + normalized_addr = "{}/{}".format(addr.network, addr.prefixlen) normalized_addrs.add(normalized_addr) addrs_in_ag = [] addrs_not_in_ag = [] diff --git a/neutron/db/agents_db.py b/neutron/db/agents_db.py index 9d2a0167b7d..02f8bb61350 100644 --- a/neutron/db/agents_db.py +++ b/neutron/db/agents_db.py @@ -80,7 +80,7 @@ def get_availability_zones_by_agent_type(context, agent_type, agents = agent_obj.Agent.get_agents_by_availability_zones_and_agent_type( context, agent_type=agent_type, availability_zones=availability_zones) - return set(agent.availability_zone for agent in agents) + return {agent.availability_zone for agent in agents} class AgentAvailabilityZoneMixin(az_ext.AvailabilityZonePluginBase): @@ -249,8 +249,8 @@ class AgentDbMixin(ext_agent.AgentPluginBase, AgentAvailabilityZoneMixin): def _make_agent_dict(self, agent, fields=None): attr = agent_apidef.RESOURCE_ATTRIBUTE_MAP.get( agent_apidef.COLLECTION_NAME) - res = dict((k, agent[k]) for k in attr - if k not in ['alive', 'configurations']) + res = {k: agent[k] for k in attr + if k not in ['alive', 'configurations']} res['alive'] = not utils.is_agent_down( res['heartbeat_timestamp'] ) @@ -372,7 +372,7 @@ class AgentDbMixin(ext_agent.AgentPluginBase, AgentAvailabilityZoneMixin): status = agent_consts.AGENT_ALIVE with db_api.CONTEXT_WRITER.using(context): res_keys = ['agent_type', 'binary', 'host', 'topic'] - res = dict((k, agent_state[k]) for k in res_keys) + res = {k: agent_state[k] for k in res_keys} if 'availability_zone' in agent_state: res['availability_zone'] = agent_state['availability_zone'] configurations_dict = agent_state.get('configurations', {}) @@ -457,7 +457,7 @@ class AgentDbMixin(ext_agent.AgentPluginBase, AgentAvailabilityZoneMixin): tracker.set_versions(consumer, resource_versions) -class AgentExtRpcCallback(object): +class AgentExtRpcCallback: """Processes the rpc report in plugin implementations. This class implements the server side of an rpc interface. The client side @@ -477,7 +477,7 @@ class AgentExtRpcCallback(object): START_TIME = timeutils.utcnow() def __init__(self, plugin=None): - super(AgentExtRpcCallback, self).__init__() + super().__init__() self.plugin = plugin # TODO(ajo): fix the resources circular dependency issue by dynamically # registering object types in the RPC callbacks api diff --git a/neutron/db/agentschedulers_db.py b/neutron/db/agentschedulers_db.py index 4efba7b4219..46ed0566299 100644 --- a/neutron/db/agentschedulers_db.py +++ b/neutron/db/agentschedulers_db.py @@ -81,7 +81,7 @@ class AgentSchedulerDbMixin(agents_db.AgentDbMixin): def update_agent(self, context, id, agent): original_agent = self.get_agent(context, id) - result = super(AgentSchedulerDbMixin, self).update_agent( + result = super().update_agent( context, id, agent) agent_data = agent['agent'] agent_notifier = self.agent_notifiers.get(original_agent['agent_type']) diff --git a/neutron/db/allowedaddresspairs_db.py b/neutron/db/allowedaddresspairs_db.py index 34e17764ed3..a4fff46d3bd 100644 --- a/neutron/db/allowedaddresspairs_db.py +++ b/neutron/db/allowedaddresspairs_db.py @@ -29,7 +29,7 @@ from neutron.objects.port.extensions import (allowedaddresspairs @resource_extend.has_resource_extenders -class AllowedAddressPairsMixin(object): +class AllowedAddressPairsMixin: """Mixin class for allowed address pairs.""" def _process_create_allowed_address_pairs(self, context, port, diff --git a/neutron/db/data_plane_status_db.py b/neutron/db/data_plane_status_db.py index b37bab5f7be..d489e1713e9 100644 --- a/neutron/db/data_plane_status_db.py +++ b/neutron/db/data_plane_status_db.py @@ -17,7 +17,7 @@ from neutron_lib.api.definitions import data_plane_status as dps_lib from neutron.objects.port.extensions import data_plane_status as dps_obj -class DataPlaneStatusMixin(object): +class DataPlaneStatusMixin: """Mixin class to add data plane status to a port""" def _process_create_port_data_plane_status(self, context, data, res): diff --git a/neutron/db/db_base_plugin_common.py b/neutron/db/db_base_plugin_common.py index e806265c4c3..887271b28f8 100644 --- a/neutron/db/db_base_plugin_common.py +++ b/neutron/db/db_base_plugin_common.py @@ -94,7 +94,7 @@ def make_result_with_fields(f): return inner -class DbBasePluginCommon(object): +class DbBasePluginCommon: """Stores getters and helper methods for db_base_plugin_v2 All private getters and simple helpers like _make_*_dict were moved from diff --git a/neutron/db/db_base_plugin_v2.py b/neutron/db/db_base_plugin_v2.py index 26179e06f07..28826ffc773 100644 --- a/neutron/db/db_base_plugin_v2.py +++ b/neutron/db/db_base_plugin_v2.py @@ -90,8 +90,8 @@ def _ensure_subnet_not_used(context, subnet_id): def _update_subnetpool_dict(orig_pool, new_pool): - updated = dict((k, v) for k, v in orig_pool.to_dict().items() - if k not in orig_pool.synthetic_fields or k == 'shared') + updated = {k: v for k, v in orig_pool.to_dict().items() + if k not in orig_pool.synthetic_fields or k == 'shared'} new_pool = new_pool.copy() new_prefixes = new_pool.pop('prefixes', constants.ATTR_NOT_SPECIFIED) @@ -164,7 +164,7 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon, query_hook=_port_query_hook, filter_hook=_port_filter_hook, result_filters=None) - return super(NeutronDbPluginV2, cls).__new__(cls, *args, **kwargs) + return super().__new__(cls, *args, **kwargs) @staticmethod def _validate_ipv6_pd(): @@ -905,7 +905,8 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon, if has_cidr: # turn the CIDR into a proper subnet net = netaddr.IPNetwork(s['cidr']) - subnet['subnet']['cidr'] = '%s/%s' % (net.network, net.prefixlen) + subnet['subnet']['cidr'] = '{}/{}'.format( + net.network, net.prefixlen) subnetpool_id = self._get_subnetpool_id(context, s) if not subnetpool_id and not has_cidr: @@ -1264,8 +1265,8 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon, context, network_id=affected_source_network_ids, ip_version=ip_version) - all_affected_subnet_ids = set( - [subnet.id for subnet in all_network_subnets]) + all_affected_subnet_ids = { + subnet.id for subnet in all_network_subnets} # Use set difference to identify the subnets that would be # violating address scope affinity constraints if the subnet diff --git a/neutron/db/dns_db.py b/neutron/db/dns_db.py index 7ae78b6bc2b..7118289dd61 100644 --- a/neutron/db/dns_db.py +++ b/neutron/db/dns_db.py @@ -32,7 +32,7 @@ from neutron.services.externaldns import driver LOG = logging.getLogger(__name__) -class DNSActionsData(object): +class DNSActionsData: def __init__(self, current_dns_name=None, current_dns_domain=None, previous_dns_name=None, previous_dns_domain=None): @@ -43,7 +43,7 @@ class DNSActionsData(object): @resource_extend.has_resource_extenders -class DNSDbMixin(object): +class DNSDbMixin: """Mixin class to add DNS methods to db_base_plugin_v2.""" _dns_driver = None diff --git a/neutron/db/external_net_db.py b/neutron/db/external_net_db.py index 591f17d76b5..b873b088b4d 100644 --- a/neutron/db/external_net_db.py +++ b/neutron/db/external_net_db.py @@ -53,7 +53,7 @@ def _network_result_filter_hook(query, filters): @resource_extend.has_resource_extenders @registry.has_registry_receivers -class External_net_db_mixin(object): +class External_net_db_mixin: """Mixin class to add external network methods to db_base_plugin_v2.""" def __new__(cls, *args, **kwargs): @@ -73,7 +73,7 @@ class External_net_db_mixin(object): result_filters=None, rbac_actions=EXTERNAL_NETWORK_RBAC_ACTIONS, ) - return super(External_net_db_mixin, cls).__new__(cls, *args, **kwargs) + return super().__new__(cls, *args, **kwargs) def _network_is_external(self, context, net_id): return net_obj.ExternalNetwork.objects_exist( diff --git a/neutron/db/extradhcpopt_db.py b/neutron/db/extradhcpopt_db.py index 1949b32e178..18e7dd5554e 100644 --- a/neutron/db/extradhcpopt_db.py +++ b/neutron/db/extradhcpopt_db.py @@ -23,7 +23,7 @@ from neutron.objects import ports as port_obj @resource_extend.has_resource_extenders -class ExtraDhcpOptMixin(object): +class ExtraDhcpOptMixin: """Mixin class to add extra options to the DHCP opts file and associate them to a port. """ diff --git a/neutron/db/extraroute_db.py b/neutron/db/extraroute_db.py index 7c346c7b43f..4fab177483b 100644 --- a/neutron/db/extraroute_db.py +++ b/neutron/db/extraroute_db.py @@ -68,7 +68,7 @@ class ExtraRoute_dbonly_mixin(l3_db.L3_NAT_dbonly_mixin): context, request_body=router_data, states=(old_router,), resource_id=id, desired_state=router_db)) - return super(ExtraRoute_dbonly_mixin, self).update_router( + return super().update_router( context, id, router) def _validate_routes_nexthop(self, cidrs, ips, routes, nexthop): @@ -153,8 +153,7 @@ class ExtraRoute_dbonly_mixin(l3_db.L3_NAT_dbonly_mixin): def _confirm_router_interface_not_in_use(self, context, router_id, subnet): - super(ExtraRoute_dbonly_mixin, - self)._confirm_router_interface_not_in_use( + super()._confirm_router_interface_not_in_use( context, router_id, subnet) subnet_cidr = netaddr.IPNetwork(subnet['cidr']) extra_routes = self._get_extra_routes_by_router_id(context, router_id) diff --git a/neutron/db/flavors_db.py b/neutron/db/flavors_db.py index 7e040e3cfb8..462b14de178 100644 --- a/neutron/db/flavors_db.py +++ b/neutron/db/flavors_db.py @@ -26,7 +26,7 @@ from neutron.objects import flavor as obj_flavor LOG = logging.getLogger(__name__) -class FlavorsDbMixin(object): +class FlavorsDbMixin: """Class to support flavors and service profiles.""" diff --git a/neutron/db/ipam_backend_mixin.py b/neutron/db/ipam_backend_mixin.py index 9c245ae6898..fc8224df586 100644 --- a/neutron/db/ipam_backend_mixin.py +++ b/neutron/db/ipam_backend_mixin.py @@ -123,11 +123,11 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon): old_route_list = self._get_route_by_subnet(context, id) - new_route_set = set([_combine(route) - for route in s['host_routes']]) + new_route_set = {_combine(route) + for route in s['host_routes']} - old_route_set = set([_combine(route) - for route in old_route_list]) + old_route_set = {_combine(route) + for route in old_route_list} for route_str in old_route_set - new_route_set: for route in old_route_list: @@ -481,8 +481,8 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon): new_ips, device_owner): """Calculate changes in IPs for the port.""" # Collect auto addressed subnet ids that has to be removed on update - delete_subnet_ids = set(ip['subnet_id'] for ip in new_ips - if ip.get('delete_subnet')) + delete_subnet_ids = {ip['subnet_id'] for ip in new_ips + if ip.get('delete_subnet')} ips = [ip for ip in new_ips if ip.get('subnet_id') not in delete_subnet_ids] @@ -701,7 +701,7 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon): network_id=network_id, service_type=service_type) def _make_subnet_args(self, detail, subnet, subnetpool_id): - args = super(IpamBackendMixin, self)._make_subnet_args( + args = super()._make_subnet_args( detail, subnet, subnetpool_id) if validators.is_attr_set(subnet.get(segment.SEGMENT_ID)): args['segment_id'] = subnet[segment.SEGMENT_ID] diff --git a/neutron/db/ipam_pluggable_backend.py b/neutron/db/ipam_pluggable_backend.py index fc22527d5c0..67443c9537f 100644 --- a/neutron/db/ipam_pluggable_backend.py +++ b/neutron/db/ipam_pluggable_backend.py @@ -516,7 +516,7 @@ class IpamPluggableBackend(ipam_backend_mixin.IpamBackendMixin): port = port or self._get_port(context, id) ipam_driver = driver.Pool.get_instance(None, context) - super(IpamPluggableBackend, self).delete_port(context, id) + super().delete_port(context, id) # Deallocating ips via IPAM after port is deleted locally. # So no need to do rollback actions on remote server # in case of fail to delete port locally @@ -534,7 +534,7 @@ class IpamPluggableBackend(ipam_backend_mixin.IpamBackendMixin): # _update_subnet_allocation_pools (ipam_backend_mixin), # so create unchanged copy for ipam driver subnet_copy = copy.deepcopy(s) - subnet, changes = super(IpamPluggableBackend, self).update_db_subnet( + subnet, changes = super().update_db_subnet( context, id, s, old_pools, subnet_obj=subnet_obj) ipam_driver = driver.Pool.get_instance(None, context) diff --git a/neutron/db/l3_agentschedulers_db.py b/neutron/db/l3_agentschedulers_db.py index ac073711859..c09b90d1df1 100644 --- a/neutron/db/l3_agentschedulers_db.py +++ b/neutron/db/l3_agentschedulers_db.py @@ -421,8 +421,8 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase, agent_modes = filters.pop('agent_modes', []) if agent_modes: - config_filters = set('\"agent_mode\": \"%s\"' % agent_mode - for agent_mode in agent_modes) + config_filters = {'\"agent_mode\": \"%s\"' % agent_mode + for agent_mode in agent_modes} agent_filters.update(filters) agent_objs = [] if config_filters: diff --git a/neutron/db/l3_attrs_db.py b/neutron/db/l3_attrs_db.py index dd165c3a70c..891eca856bf 100644 --- a/neutron/db/l3_attrs_db.py +++ b/neutron/db/l3_attrs_db.py @@ -42,7 +42,7 @@ def get_attr_info(): @resource_extend.has_resource_extenders -class ExtraAttributesMixin(object): +class ExtraAttributesMixin: """Mixin class to enable router's extra attributes.""" @staticmethod diff --git a/neutron/db/l3_db.py b/neutron/db/l3_db.py index 8261f845efc..d78832107d0 100644 --- a/neutron/db/l3_db.py +++ b/neutron/db/l3_db.py @@ -101,7 +101,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase, _fip_qos = None def __new__(cls, *args, **kwargs): - inst = super(L3_NAT_dbonly_mixin, cls).__new__(cls, *args, **kwargs) + inst = super().__new__(cls, *args, **kwargs) inst._start_janitor() return inst @@ -226,7 +226,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase, return router def _make_router_dict(self, router, fields=None, process_extensions=True): - res = dict((key, router[key]) for key in CORE_ROUTER_ATTRS) + res = {key: router[key] for key in CORE_ROUTER_ATTRS} if router['gw_port_id']: ext_gw_info = { 'network_id': router.gw_port['network_id'], @@ -579,15 +579,15 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase, if not gw_port: return True - subnet_ids = set(ip['subnet_id'] for ip in gw_port['fixed_ips']) - new_subnet_ids = set(f['subnet_id'] for f in ext_ips - if f.get('subnet_id')) + subnet_ids = {ip['subnet_id'] for ip in gw_port['fixed_ips']} + new_subnet_ids = {f['subnet_id'] for f in ext_ips + if f.get('subnet_id')} subnet_change = not new_subnet_ids == subnet_ids if subnet_change: return True - ip_addresses = set(ip['ip_address'] for ip in gw_port['fixed_ips']) - new_ip_addresses = set(f['ip_address'] for f in ext_ips - if f.get('ip_address')) + ip_addresses = {ip['ip_address'] for ip in gw_port['fixed_ips']} + new_ip_addresses = {f['ip_address'] for f in ext_ips + if f.get('ip_address')} ip_address_change = not ip_addresses == new_ip_addresses return ip_address_change @@ -1654,9 +1654,9 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase, return old_floatingip, floatingip_dict def _floatingips_to_router_ids(self, floatingips): - return list(set([floatingip['router_id'] - for floatingip in floatingips - if floatingip['router_id']])) + return list({floatingip['router_id'] + for floatingip in floatingips + if floatingip['router_id']}) @db_api.retry_if_session_inactive() def update_floatingip(self, context, id, floatingip): @@ -1912,9 +1912,9 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase, filters=filters) if not router_dicts: return [] - gw_ports = dict((r['gw_port']['id'], r['gw_port']) - for r in router_dicts - if r.get('gw_port')) + gw_ports = {r['gw_port']['id']: r['gw_port'] + for r in router_dicts + if r.get('gw_port')} return self._build_routers_list(context, router_dicts, gw_ports) def _make_floatingip_dict_with_scope(self, floatingip_obj, scope_id): @@ -1990,7 +1990,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase, subnet['address_scope_id'] = address_scope_id return subnet - subnets_by_network = dict((id, []) for id in network_ids) + subnets_by_network = {id: [] for id in network_ids} for subnet in (make_subnet_dict_with_scope(row) for row in query): subnets_by_network[subnet['network_id']].append(subnet) return subnets_by_network @@ -2002,8 +2002,8 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase, fields = ['id', 'mtu'] networks = self._core_plugin.get_networks(context, filters=filters, fields=fields) - mtus_by_network = dict((network['id'], network.get('mtu', 0)) - for network in networks) + mtus_by_network = {network['id']: network.get('mtu', 0) + for network in networks} return mtus_by_network def _populate_mtu_and_subnets_for_ports(self, context, ports): @@ -2107,7 +2107,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase, ports_to_populate = [router['gw_port'] for router in routers if router.get('gw_port')] + interfaces self._populate_mtu_and_subnets_for_ports(context, ports_to_populate) - routers_dict = dict((router['id'], router) for router in routers) + routers_dict = {router['id']: router for router in routers} self._process_floating_ips(context, routers_dict, floating_ips) self._process_interfaces(routers_dict, interfaces) return list(routers_dict.values()) @@ -2123,7 +2123,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase, @registry.has_registry_receivers -class L3RpcNotifierMixin(object): +class L3RpcNotifierMixin: """Mixin class to add rpc notifier attribute to db_base_plugin_v2.""" @staticmethod @@ -2156,7 +2156,7 @@ class L3RpcNotifierMixin(object): device_owner=DEVICE_OWNER_ROUTER_GW) query = query.join(models_v2.Port.fixed_ips).filter( models_v2.IPAllocation.subnet_id == subnet_id) - router_ids = set(port.device_id for port in query) + router_ids = {port.device_id for port in query} for router_id in router_ids: l3plugin.notify_router_updated(context, router_id) @@ -2225,26 +2225,26 @@ class L3_NAT_db_mixin(L3_NAT_dbonly_mixin, L3RpcNotifierMixin): """Mixin class to add rpc notifier methods to db_base_plugin_v2.""" def create_router(self, context, router): - router_dict = super(L3_NAT_db_mixin, self).create_router(context, - router) + router_dict = super().create_router(context, + router) if router_dict.get('external_gateway_info'): self.notify_router_updated(context, router_dict['id'], None) return router_dict def update_router(self, context, id, router): - router_dict = super(L3_NAT_db_mixin, self).update_router(context, - id, router) + router_dict = super().update_router(context, + id, router) self.notify_router_updated(context, router_dict['id'], None) return router_dict def delete_router(self, context, id): - super(L3_NAT_db_mixin, self).delete_router(context, id) + super().delete_router(context, id) self.notify_router_deleted(context, id) def notify_router_interface_action( self, context, router_interface_info, action): l3_method = '%s_router_interface' % action - super(L3_NAT_db_mixin, self).notify_routers_updated( + super().notify_routers_updated( context, [router_interface_info['id']], l3_method, {'subnet_id': router_interface_info['subnet_id']}) @@ -2255,16 +2255,14 @@ class L3_NAT_db_mixin(L3_NAT_dbonly_mixin, L3RpcNotifierMixin): {'router_interface': router_interface_info}) def add_router_interface(self, context, router_id, interface_info=None): - router_interface_info = super( - L3_NAT_db_mixin, self).add_router_interface( + router_interface_info = super().add_router_interface( context, router_id, interface_info) self.notify_router_interface_action( context, router_interface_info, 'add') return router_interface_info def remove_router_interface(self, context, router_id, interface_info): - router_interface_info = super( - L3_NAT_db_mixin, self).remove_router_interface( + router_interface_info = super().remove_router_interface( context, router_id, interface_info) self.notify_router_interface_action( context, router_interface_info, 'remove') @@ -2272,7 +2270,7 @@ class L3_NAT_db_mixin(L3_NAT_dbonly_mixin, L3RpcNotifierMixin): def create_floatingip(self, context, floatingip, initial_status=constants.FLOATINGIP_STATUS_ACTIVE): - floatingip_dict = super(L3_NAT_db_mixin, self).create_floatingip( + floatingip_dict = super().create_floatingip( context, floatingip, initial_status) router_id = floatingip_dict['router_id'] self.notify_router_updated(context, router_id, 'create_floatingip') @@ -2283,7 +2281,7 @@ class L3_NAT_db_mixin(L3_NAT_dbonly_mixin, L3RpcNotifierMixin): context, id, floatingip) router_ids = self._floatingips_to_router_ids( [old_floatingip, floatingip]) - super(L3_NAT_db_mixin, self).notify_routers_updated( + super().notify_routers_updated( context, router_ids, 'update_floatingip', {}) return floatingip @@ -2293,7 +2291,7 @@ class L3_NAT_db_mixin(L3_NAT_dbonly_mixin, L3RpcNotifierMixin): 'delete_floatingip') def notify_routers_updated(self, context, router_ids): - super(L3_NAT_db_mixin, self).notify_routers_updated( + super().notify_routers_updated( context, list(router_ids), 'disassociate_floatingips', {}) def _migrate_router_ports(self, context, router_db, old_owner, new_owner): diff --git a/neutron/db/l3_dvr_db.py b/neutron/db/l3_dvr_db.py index 1f3b3eb8962..f5ad670564d 100644 --- a/neutron/db/l3_dvr_db.py +++ b/neutron/db/l3_dvr_db.py @@ -84,7 +84,7 @@ def is_port_bound(port): @registry.has_registry_receivers -class DVRResourceOperationHandler(object): +class DVRResourceOperationHandler: """Contains callbacks for DVR operations. This can be implemented as a mixin or can be instantiated as a stand-alone @@ -859,7 +859,7 @@ class DVRResourceOperationHandler(object): self._core_plugin.delete_port(payload.context, gw_port['id']) -class _DVRAgentInterfaceMixin(object): +class _DVRAgentInterfaceMixin: """Contains calls made by the DVR scheduler and RPC interface. Must be instantiated as a mixin with the L3 plugin. @@ -897,7 +897,7 @@ class _DVRAgentInterfaceMixin(object): def _build_routers_list(self, context, routers, gw_ports): # Perform a single query up front for all routers - routers = super(_DVRAgentInterfaceMixin, self)._build_routers_list( + routers = super()._build_routers_list( context, routers, gw_ports) for router in routers: gw_port_host = self._get_gateway_port_host( @@ -1003,8 +1003,8 @@ class _DVRAgentInterfaceMixin(object): context, router_ids=router_ids, active=active, device_owners=const.ROUTER_INTERFACE_OWNERS, fip_host_filter=fip_host_filter) - dvr_router_ids = set(router['id'] for router in routers - if is_distributed_router(router)) + dvr_router_ids = {router['id'] for router in routers + if is_distributed_router(router)} floating_ip_port_ids = [fip['port_id'] for fip in floating_ips if fip['router_id'] in dvr_router_ids] if floating_ip_port_ids: @@ -1346,8 +1346,7 @@ class L3_NAT_with_dvr_db_mixin(_DVRAgentInterfaceMixin, router = self._get_router(context, router) if is_distributed_router(router): return const.DEVICE_OWNER_DVR_INTERFACE - return super(L3_NAT_with_dvr_db_mixin, - self)._get_device_owner(context, router) + return super()._get_device_owner(context, router) @db_api.retry_if_session_inactive() def create_floatingip(self, context, floatingip, diff --git a/neutron/db/l3_dvr_ha_scheduler_db.py b/neutron/db/l3_dvr_ha_scheduler_db.py index 2eafdf5ea6f..255f62e2dcc 100644 --- a/neutron/db/l3_dvr_ha_scheduler_db.py +++ b/neutron/db/l3_dvr_ha_scheduler_db.py @@ -27,8 +27,7 @@ class L3_DVR_HA_scheduler_db_mixin(l3agent_dvr_sch_db.L3_DVRsch_db_mixin, In case dvr serviceable port was deleted we need to check if any dvr routers should be removed from l3 agent on port's host """ - remove_router_info = super( - L3_DVR_HA_scheduler_db_mixin, self).get_dvr_routers_to_remove( + remove_router_info = super().get_dvr_routers_to_remove( context, port_id, get_related_hosts_info) # Process the router information which was returned to make # sure we don't delete routers which have dvrhs snat bindings. diff --git a/neutron/db/l3_dvrscheduler_db.py b/neutron/db/l3_dvrscheduler_db.py index db2a5caa714..c85a2f1d0c0 100644 --- a/neutron/db/l3_dvrscheduler_db.py +++ b/neutron/db/l3_dvrscheduler_db.py @@ -303,7 +303,7 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin): def get_hosts_to_notify(self, context, router_id): """Returns all hosts to send notification about router update""" - hosts = super(L3_DVRsch_db_mixin, self).get_hosts_to_notify( + hosts = super().get_hosts_to_notify( context, router_id) router = self.get_router(context.elevated(), router_id) if router.get('distributed', False): @@ -418,8 +418,7 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin): def _get_router_ids_for_agent(self, context, agent_db, router_ids, with_dvr=True): - result_set = set(super(L3_DVRsch_db_mixin, - self)._get_router_ids_for_agent( + result_set = set(super()._get_router_ids_for_agent( context, agent_db, router_ids, with_dvr)) if not with_dvr: return result_set diff --git a/neutron/db/l3_extra_gws_db.py b/neutron/db/l3_extra_gws_db.py index 69648f6483d..bae643adc23 100644 --- a/neutron/db/l3_extra_gws_db.py +++ b/neutron/db/l3_extra_gws_db.py @@ -300,9 +300,9 @@ class ExtraGatewaysDbOnlyMixin(l3_gwmode_db.L3_NAT_dbonly_mixin): 'is not specified in the request') % net_id) for gw_port in gw_ports: - current_set = set([a.ip_address for a in gw_port['fixed_ips']]) - target_set = set([netaddr.IPAddress(d['ip_address']) - for d in gw_info['external_fixed_ips']]) + current_set = {a.ip_address for a in gw_port['fixed_ips']} + target_set = {netaddr.IPAddress(d['ip_address']) + for d in gw_info['external_fixed_ips']} # If there is an intersection - it's a partial match. if current_set & target_set: part_matched_port_ids[gw_port['id']] = gw_info diff --git a/neutron/db/l3_fip_pools_db.py b/neutron/db/l3_fip_pools_db.py index 035f867fa56..be20ecdbe6a 100644 --- a/neutron/db/l3_fip_pools_db.py +++ b/neutron/db/l3_fip_pools_db.py @@ -24,7 +24,7 @@ from neutron.objects import network as net_obj from neutron.objects import subnet as subnet_obj -class FloatingIPPoolsDbMixin(object): +class FloatingIPPoolsDbMixin: """Class to support floating IP pool.""" _is_v6_supported = None diff --git a/neutron/db/l3_fip_port_details.py b/neutron/db/l3_fip_port_details.py index d414f5ed972..09160303337 100644 --- a/neutron/db/l3_fip_port_details.py +++ b/neutron/db/l3_fip_port_details.py @@ -27,7 +27,7 @@ def _make_port_details_dict(port): @resource_extend.has_resource_extenders -class Fip_port_details_db_mixin(object): +class Fip_port_details_db_mixin: """Mixin class to enable floating IP's port_details attributes.""" @staticmethod diff --git a/neutron/db/l3_fip_qos.py b/neutron/db/l3_fip_qos.py index 99e19c857ce..1be6fe5dc60 100644 --- a/neutron/db/l3_fip_qos.py +++ b/neutron/db/l3_fip_qos.py @@ -18,7 +18,7 @@ from neutron_lib.services.qos import constants as qos_consts @resource_extend.has_resource_extenders -class FloatingQoSDbMixin(object): +class FloatingQoSDbMixin: """Mixin class to enable floating IP's QoS extra attributes.""" @staticmethod diff --git a/neutron/db/l3_gateway_ip_qos.py b/neutron/db/l3_gateway_ip_qos.py index d69655f0e12..98a5b1d9159 100644 --- a/neutron/db/l3_gateway_ip_qos.py +++ b/neutron/db/l3_gateway_ip_qos.py @@ -70,8 +70,7 @@ class L3_gw_ip_qos_dbonly_mixin(l3_gwmode_db.L3_NAT_dbonly_mixin): def _update_router_gw_info(self, context, router_id, info, request_body, router=None): # Calls superclass, pass router db object for avoiding re-loading - router = super(L3_gw_ip_qos_dbonly_mixin, - self)._update_router_gw_info( + router = super()._update_router_gw_info( context, router_id, info, request_body, router) if not self._is_gw_ip_qos_supported: @@ -112,8 +111,7 @@ class L3_gw_ip_qos_dbonly_mixin(l3_gwmode_db.L3_NAT_dbonly_mixin): context, router_id, new_qos_policy_id) def _build_routers_list(self, context, routers, gw_ports): - routers = super(L3_gw_ip_qos_dbonly_mixin, - self)._build_routers_list( + routers = super()._build_routers_list( context, routers, gw_ports) if not self._is_gw_ip_qos_supported: diff --git a/neutron/db/l3_gwmode_db.py b/neutron/db/l3_gwmode_db.py index b5206218051..875a5e0f9e1 100644 --- a/neutron/db/l3_gwmode_db.py +++ b/neutron/db/l3_gwmode_db.py @@ -71,7 +71,7 @@ class L3_NAT_dbonly_mixin(l3_db.L3_NAT_dbonly_mixin): desired_state=router)) # Calls superclass, pass router db object for avoiding re-loading - super(L3_NAT_dbonly_mixin, self)._update_router_gw_info( + super()._update_router_gw_info( context, router_id, info, request_body, router=router) # Returning the router might come back useful if this # method is overridden in child classes @@ -85,7 +85,7 @@ class L3_NAT_dbonly_mixin(l3_db.L3_NAT_dbonly_mixin): return cfg.CONF.enable_snat_by_default def _build_routers_list(self, context, routers, gw_ports): - routers = super(L3_NAT_dbonly_mixin, self)._build_routers_list( + routers = super()._build_routers_list( context, routers, gw_ports) for rtr in routers: gw_port_id = rtr['gw_port_id'] diff --git a/neutron/db/l3_hamode_db.py b/neutron/db/l3_hamode_db.py index ddb9cf0bb96..ae9ef5eae0d 100644 --- a/neutron/db/l3_hamode_db.py +++ b/neutron/db/l3_hamode_db.py @@ -97,7 +97,7 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin, raise l3ha_exc.HAMaximumAgentsNumberNotValid(max_agents=max_agents) def __new__(cls, *args, **kwargs): - inst = super(L3_HA_NAT_db_mixin, cls).__new__(cls, *args, **kwargs) + inst = super().__new__(cls, *args, **kwargs) inst._verify_configuration() return inst @@ -111,7 +111,7 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin, vr_id_objs = l3_hamode.L3HARouterVRIdAllocation.get_objects( context, network_id=network_id) - allocated_vr_ids = set(a.vr_id for a in vr_id_objs) - set([0]) + allocated_vr_ids = {a.vr_id for a in vr_id_objs} - {0} return allocated_vr_ids def _get_vr_id(self, context, network_id): @@ -356,8 +356,7 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin, if (is_ha_router(router) and not l3_dvr_db.is_distributed_router(router)): return constants.DEVICE_OWNER_HA_REPLICATED_INT - return super(L3_HA_NAT_db_mixin, - self)._get_device_owner(context, router) + return super()._get_device_owner(context, router) @registry.receives(resources.ROUTER, [events.BEFORE_CREATE], priority_group.PRIORITY_ROUTER_EXTENDED_ATTRIBUTE) @@ -668,7 +667,7 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin, @log_helpers.log_method_call def _process_sync_ha_data(self, context, routers, host, is_any_dvr_agent): - routers_dict = dict((router['id'], router) for router in routers) + routers_dict = {router['id']: router for router in routers} bindings = self.get_ha_router_port_bindings(context, routers_dict.keys(), @@ -717,7 +716,7 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin, sync_data = self._get_dvr_sync_data(context, host, agent, router_ids, active) else: - sync_data = super(L3_HA_NAT_db_mixin, self).get_sync_data( + sync_data = super().get_sync_data( context, router_ids, active) return self._process_sync_ha_data( context, sync_data, host, dvr_agent_mode) @@ -767,7 +766,7 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin, def _get_gateway_port_host(self, context, router, gw_ports): if not router.get('ha'): - return super(L3_HA_NAT_db_mixin, self)._get_gateway_port_host( + return super()._get_gateway_port_host( context, router, gw_ports) gw_port_id = router['gw_port_id'] diff --git a/neutron/db/metering/metering_rpc.py b/neutron/db/metering/metering_rpc.py index b197f744f8d..8523a8fc8a9 100644 --- a/neutron/db/metering/metering_rpc.py +++ b/neutron/db/metering/metering_rpc.py @@ -23,7 +23,7 @@ import oslo_messaging LOG = logging.getLogger(__name__) -class MeteringRpcCallbacks(object): +class MeteringRpcCallbacks: target = oslo_messaging.Target(version='1.0') diff --git a/neutron/db/migration/alembic_migrations/dvr_init_opts.py b/neutron/db/migration/alembic_migrations/dvr_init_opts.py index 729b0988bed..83a0e009712 100644 --- a/neutron/db/migration/alembic_migrations/dvr_init_opts.py +++ b/neutron/db/migration/alembic_migrations/dvr_init_opts.py @@ -39,7 +39,7 @@ def upgrade(): nullable=False, server_default='normal'), sa.Column('profile', sa.String(length=4095), nullable=False, server_default=''), - sa.Column(u'status', sa.String(16), nullable=False), + sa.Column('status', sa.String(16), nullable=False), sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('port_id', 'host') diff --git a/neutron/db/migration/alembic_migrations/loadbalancer_init_ops.py b/neutron/db/migration/alembic_migrations/loadbalancer_init_ops.py index 111bcf64b33..329b32a893b 100644 --- a/neutron/db/migration/alembic_migrations/loadbalancer_init_ops.py +++ b/neutron/db/migration/alembic_migrations/loadbalancer_init_ops.py @@ -133,11 +133,11 @@ def upgrade(): sa.PrimaryKeyConstraint('pool_id')) op.create_table( - u'embrane_pool_port', - sa.Column(u'pool_id', sa.String(length=36), nullable=False), - sa.Column(u'port_id', sa.String(length=36), nullable=False), - sa.ForeignKeyConstraint(['pool_id'], [u'pools.id'], - name=u'embrane_pool_port_ibfk_1'), - sa.ForeignKeyConstraint(['port_id'], [u'ports.id'], - name=u'embrane_pool_port_ibfk_2'), - sa.PrimaryKeyConstraint(u'pool_id')) + 'embrane_pool_port', + sa.Column('pool_id', sa.String(length=36), nullable=False), + sa.Column('port_id', sa.String(length=36), nullable=False), + sa.ForeignKeyConstraint(['pool_id'], ['pools.id'], + name='embrane_pool_port_ibfk_1'), + sa.ForeignKeyConstraint(['port_id'], ['ports.id'], + name='embrane_pool_port_ibfk_2'), + sa.PrimaryKeyConstraint('pool_id')) diff --git a/neutron/db/migration/alembic_migrations/versions/2023.2/expand/682c319773d7_create_l3harouternetwork_project_id_.py b/neutron/db/migration/alembic_migrations/versions/2023.2/expand/682c319773d7_create_l3harouternetwork_project_id_.py index c506b2bec67..e920f9be7f6 100644 --- a/neutron/db/migration/alembic_migrations/versions/2023.2/expand/682c319773d7_create_l3harouternetwork_project_id_.py +++ b/neutron/db/migration/alembic_migrations/versions/2023.2/expand/682c319773d7_create_l3harouternetwork_project_id_.py @@ -35,6 +35,6 @@ COLUMN = 'project_id' def upgrade(): op.create_unique_constraint( - constraint_name='uniq_%s0%s' % (TABLE, COLUMN), + constraint_name='uniq_{}0{}'.format(TABLE, COLUMN), table_name=TABLE, columns=[COLUMN]) diff --git a/neutron/db/migration/alembic_migrations/versions/2023.2/expand/6f1145bff34c_port_hints.py b/neutron/db/migration/alembic_migrations/versions/2023.2/expand/6f1145bff34c_port_hints.py index b82d3de9566..9a465165695 100644 --- a/neutron/db/migration/alembic_migrations/versions/2023.2/expand/6f1145bff34c_port_hints.py +++ b/neutron/db/migration/alembic_migrations/versions/2023.2/expand/6f1145bff34c_port_hints.py @@ -40,6 +40,6 @@ def upgrade(): sa.ForeignKey('ports.id', ondelete='CASCADE'), primary_key=True), sa.Column('hints', - sa.String(4095), - nullable=False), + sa.String(4095), + nullable=False), ) diff --git a/neutron/db/migration/alembic_migrations/versions/victoria/expand/fd6107509ccd_ovn_distributed_device_owner.py b/neutron/db/migration/alembic_migrations/versions/victoria/expand/fd6107509ccd_ovn_distributed_device_owner.py index 86806c4c470..50584f32466 100644 --- a/neutron/db/migration/alembic_migrations/versions/victoria/expand/fd6107509ccd_ovn_distributed_device_owner.py +++ b/neutron/db/migration/alembic_migrations/versions/victoria/expand/fd6107509ccd_ovn_distributed_device_owner.py @@ -47,5 +47,5 @@ def update_device_owner_ovn_distributed_ports(): session.execute(ports.update().values( device_owner=constants.DEVICE_OWNER_DISTRIBUTED).where( ports.c.device_owner == constants.DEVICE_OWNER_DHCP).where( - ports.c.device_id.like('{}%'.format(OVN_METADATA_PREFIX)))) + ports.c.device_id.like(f'{OVN_METADATA_PREFIX}%'))) session.commit() diff --git a/neutron/db/migration/cli.py b/neutron/db/migration/cli.py index fbf105b59f0..31e6fe576be 100644 --- a/neutron/db/migration/cli.py +++ b/neutron/db/migration/cli.py @@ -250,7 +250,7 @@ def _get_release_labels(labels): for label in labels: # release labels were introduced Liberty for a short time and dropped # in that same release cycle - result.add('%s_%s' % (migration.LIBERTY, label)) + result.add('{}_{}'.format(migration.LIBERTY, label)) return result @@ -348,7 +348,7 @@ def _check_head(branch_name, head_file, head): try: with open(head_file) as file_: observed_head = file_.read().strip() - except IOError: + except OSError: pass else: if observed_head != head: diff --git a/neutron/db/migration/connection.py b/neutron/db/migration/connection.py index dcabffd7560..24cdc0b315d 100644 --- a/neutron/db/migration/connection.py +++ b/neutron/db/migration/connection.py @@ -13,7 +13,7 @@ from oslo_db.sqlalchemy import session -class DBConnection(object): +class DBConnection: """Context manager class which handles a DB connection. An existing connection can be passed as a parameter. When diff --git a/neutron/db/models/network_segment_range.py b/neutron/db/models/network_segment_range.py index 2aa426d8099..fb261de3052 100644 --- a/neutron/db/models/network_segment_range.py +++ b/neutron/db/models/network_segment_range.py @@ -66,13 +66,13 @@ class NetworkSegmentRange(standard_attr.HasStandardAttributes, tag_support = True def __init__(self, *args, **kwargs): - super(NetworkSegmentRange, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.project_id = None if self.shared else kwargs['project_id'] is_vlan = self.network_type == constants.TYPE_VLAN self.physical_network = kwargs['physical_network'] if is_vlan else None def __repr__(self): - return "" % ( + return "".format( self.id, self.name, str(self.shared), self.project_id, self.network_type, self.physical_network, self.minimum, self.maximum) diff --git a/neutron/db/models_v2.py b/neutron/db/models_v2.py index 159226224c9..943e566dc24 100644 --- a/neutron/db/models_v2.py +++ b/neutron/db/models_v2.py @@ -30,7 +30,7 @@ from neutron.db import rbac_db_models # NOTE(ralonsoh): move to neutron_lib.db.model_base -class HasInUse(object): +class HasInUse: """NeutronBaseV2 mixin, to add the flag "in_use" to a DB model. The goal of this class is to allow users lock specific database rows with @@ -87,7 +87,7 @@ class IPAllocationPool(model_base.BASEV2, model_base.HasId): last_ip = sa.Column(sa.String(64), nullable=False) def __repr__(self): - return "%s - %s" % (self.first_ip, self.last_ip) + return "{} - {}".format(self.first_ip, self.last_ip) class IPAllocation(model_base.BASEV2): @@ -107,7 +107,7 @@ class IPAllocation(model_base.BASEV2): revises_on_change = ('port', ) -class Route(object): +class Route: """mixin of a route.""" destination = sa.Column(sa.String(64), nullable=False, primary_key=True) @@ -164,7 +164,7 @@ class Port(standard_attr.HasStandardAttributes, model_base.BASEV2, network_id=None, mac_address=None, admin_state_up=None, status=None, device_id=None, device_owner=None, fixed_ips=None, **kwargs): - super(Port, self).__init__(**kwargs) + super().__init__(**kwargs) self.id = id self.project_id = project_id or tenant_id self.name = name diff --git a/neutron/db/network_ip_availability_db.py b/neutron/db/network_ip_availability_db.py index aab5f90abc5..ff6637edf94 100644 --- a/neutron/db/network_ip_availability_db.py +++ b/neutron/db/network_ip_availability_db.py @@ -34,7 +34,7 @@ SUPPORTED_FILTERS = { SUPPORTED_FILTER_KEYS = set(SUPPORTED_FILTERS.keys()) -class IpAvailabilityMixin(object): +class IpAvailabilityMixin: """Mixin class to query for IP availability.""" # Columns common to all queries diff --git a/neutron/db/port_device_profile_db.py b/neutron/db/port_device_profile_db.py index 32e8698261a..632ecf81773 100644 --- a/neutron/db/port_device_profile_db.py +++ b/neutron/db/port_device_profile_db.py @@ -17,7 +17,7 @@ from neutron_lib.api.definitions import port_device_profile as pdp from neutron.objects.port.extensions import port_device_profile as pdp_obj -class PortDeviceProfileMixin(object): +class PortDeviceProfileMixin: """Mixin class to add device profile (Cyborg) to a port""" def _process_create_port(self, context, data, result): diff --git a/neutron/db/port_hardware_offload_type_db.py b/neutron/db/port_hardware_offload_type_db.py index 9e1975e57ca..bbaef68556c 100644 --- a/neutron/db/port_hardware_offload_type_db.py +++ b/neutron/db/port_hardware_offload_type_db.py @@ -20,7 +20,7 @@ from neutron.objects.port.extensions import port_hardware_offload_type as \ phot_obj -class PortHardwareOffloadTypeDbMixin(object): +class PortHardwareOffloadTypeDbMixin: """Mixin class to add hardware offload type extension to a port""" def _process_create_port(self, context, data, result): diff --git a/neutron/db/port_hints_db.py b/neutron/db/port_hints_db.py index 0b45b413fbd..826ee8e6c89 100644 --- a/neutron/db/port_hints_db.py +++ b/neutron/db/port_hints_db.py @@ -18,7 +18,7 @@ from oslo_serialization import jsonutils from neutron.objects.port.extensions import port_hints as phints_obj -class PortHintsMixin(object): +class PortHintsMixin: """Mixin class to add hints to a port""" def _process_create_port(self, context, data, result): diff --git a/neutron/db/port_numa_affinity_policy_db.py b/neutron/db/port_numa_affinity_policy_db.py index 4a457852a2b..5c0b1f04701 100644 --- a/neutron/db/port_numa_affinity_policy_db.py +++ b/neutron/db/port_numa_affinity_policy_db.py @@ -20,7 +20,7 @@ from neutron.objects.port.extensions import port_numa_affinity_policy as \ pnap_obj -class PortNumaAffinityPolicyDbMixin(object): +class PortNumaAffinityPolicyDbMixin: """Mixin class to add NUMA affinity policy to a port""" def _process_create_port(self, context, data, result): diff --git a/neutron/db/port_trusted_db.py b/neutron/db/port_trusted_db.py index 7ee07c09149..7878571a975 100644 --- a/neutron/db/port_trusted_db.py +++ b/neutron/db/port_trusted_db.py @@ -19,7 +19,7 @@ from neutron.extensions import port_trusted_vif from neutron.objects.port.extensions import port_trusted as trusted_obj -class PortTrustedDbMixin(object): +class PortTrustedDbMixin: """Mixin class to add trusted extension to a port""" @staticmethod diff --git a/neutron/db/portsecurity_db_common.py b/neutron/db/portsecurity_db_common.py index a2d49227e58..e3a383ae96c 100644 --- a/neutron/db/portsecurity_db_common.py +++ b/neutron/db/portsecurity_db_common.py @@ -19,7 +19,7 @@ from neutron.objects import network from neutron.objects.port.extensions import port_security as p_ps -class PortSecurityDbCommon(object): +class PortSecurityDbCommon: """Mixin class to add port security.""" @staticmethod diff --git a/neutron/db/quota/api.py b/neutron/db/quota/api.py index e5d482557e8..3c6f6005816 100644 --- a/neutron/db/quota/api.py +++ b/neutron/db/quota/api.py @@ -171,8 +171,8 @@ def create_reservation(context, project_id, deltas, expiration=None): return ReservationInfo(reserv_obj['id'], reserv_obj['project_id'], reserv_obj['expiration'], - dict((delta.resource, delta.amount) - for delta in reserv_obj.resource_deltas)) + {delta.resource: delta.amount + for delta in reserv_obj.resource_deltas}) @db_api.retry_if_session_inactive() @@ -183,8 +183,8 @@ def get_reservation(context, reservation_id): return ReservationInfo(reserv_obj['id'], reserv_obj['project_id'], reserv_obj['expiration'], - dict((delta.resource, delta.amount) - for delta in reserv_obj.resource_deltas)) + {delta.resource: delta.amount + for delta in reserv_obj.resource_deltas}) @utils.transaction_guard diff --git a/neutron/db/quota/driver.py b/neutron/db/quota/driver.py index 265657578b0..694a40735fa 100644 --- a/neutron/db/quota/driver.py +++ b/neutron/db/quota/driver.py @@ -48,8 +48,8 @@ class DbQuotaDriver(nlib_quota_api.QuotaDriverAPI): # Currently the project_id parameter is unused, since all projects # share the same default values. This may change in the future so # we include project ID to remain backwards compatible. - return dict((key, resource.default) - for key, resource in resources.items()) + return {key: resource.default + for key, resource in resources.items()} @staticmethod @db_api.retry_if_session_inactive() @@ -65,8 +65,8 @@ class DbQuotaDriver(nlib_quota_api.QuotaDriverAPI): """ # init with defaults - project_quota = dict((key, resource.default) - for key, resource in resources.items()) + project_quota = {key: resource.default + for key, resource in resources.items()} # update with project specific limits quota_objs = quota_obj.Quota.get_objects(context, @@ -134,8 +134,8 @@ class DbQuotaDriver(nlib_quota_api.QuotaDriverAPI): :return: quotas list of dict of project_id:, resourcekey1: resourcekey2: ... """ - project_default = dict((key, resource.default) - for key, resource in resources.items()) + project_default = {key: resource.default + for key, resource in resources.items()} all_project_quotas = {} @@ -184,7 +184,7 @@ class DbQuotaDriver(nlib_quota_api.QuotaDriverAPI): quotas = DbQuotaDriver.get_project_quotas( context, resources, project_id) - return dict((k, v) for k, v in quotas.items()) + return dict(quotas.items()) def _handle_expired_reservations(self, context, project_id): LOG.debug("Deleting expired reservations for project: %s", project_id) @@ -213,9 +213,9 @@ class DbQuotaDriver(nlib_quota_api.QuotaDriverAPI): # retrieved current_limits = self.get_project_quotas( context, resources, project_id) - unlimited_resources = set( - [resource for (resource, limit) in current_limits.items() - if limit <= quota_api.UNLIMITED_QUOTA]) + unlimited_resources = { + resource for (resource, limit) in current_limits.items() + if limit <= quota_api.UNLIMITED_QUOTA} # Do not even bother counting resources and calculating headroom # for resources with unlimited quota LOG.debug("Resources %s have unlimited quota limit. It is not " @@ -228,10 +228,10 @@ class DbQuotaDriver(nlib_quota_api.QuotaDriverAPI): # resource triggers multiple queries on quota usage. This should be # improved, however this is not an urgent matter as the REST API # currently only allows allocation of a resource at a time - current_usages = dict( - (resource, self.get_resource_usage(context, project_id, - resources, resource)) for - resource in requested_resources) + current_usages = { + resource: self.get_resource_usage(context, project_id, + resources, resource) for + resource in requested_resources} # Adjust for expired reservations. Apparently it is cheaper than # querying every time for active reservations and counting overall # quantity of resources reserved diff --git a/neutron/db/quota/driver_nolock.py b/neutron/db/quota/driver_nolock.py index d6957ddd6be..6e79ed61a76 100644 --- a/neutron/db/quota/driver_nolock.py +++ b/neutron/db/quota/driver_nolock.py @@ -63,9 +63,9 @@ class DbQuotaNoLockDriver(quota_driver.DbQuotaDriver): with db_api.CONTEXT_WRITER.using(context): # Filter out unlimited resources. limits = self.get_project_quotas(context, resources, project_id) - unlimited_resources = set( - [resource for (resource, limit) in limits.items() - if limit <= quota_api.UNLIMITED_QUOTA]) + unlimited_resources = { + resource for (resource, limit) in limits.items() + if limit <= quota_api.UNLIMITED_QUOTA} requested_resources = (set(deltas.keys()) - unlimited_resources) # Count the number of (1) used and (2) reserved resources for this diff --git a/neutron/db/rbac_db_mixin.py b/neutron/db/rbac_db_mixin.py index bc7a4c7c58c..5d7e46c9deb 100644 --- a/neutron/db/rbac_db_mixin.py +++ b/neutron/db/rbac_db_mixin.py @@ -27,7 +27,7 @@ from neutron.objects import base as base_obj from neutron.objects import rbac as rbac_obj -class RbacPluginMixin(object): +class RbacPluginMixin: """Plugin mixin that implements the RBAC DB operations.""" object_type_cache = {} diff --git a/neutron/db/securitygroups_db.py b/neutron/db/securitygroups_db.py index 8eaf7628b22..44be1a25978 100644 --- a/neutron/db/securitygroups_db.py +++ b/neutron/db/securitygroups_db.py @@ -887,7 +887,7 @@ class SecurityGroupDbMixin( def _validate_sgs_for_port(security_groups): if not security_groups: return - if not len(set(sg.stateful for sg in security_groups)) == 1: + if not len({sg.stateful for sg in security_groups}) == 1: msg = ("Cannot apply both stateful and stateless security " "groups on the same port at the same time") raise ext_sg.SecurityGroupConflict(reason=msg) @@ -1188,13 +1188,13 @@ class SecurityGroupDbMixin( sg_objs = sg_obj.SecurityGroup.get_objects(context, id=port_sg) - valid_groups = set( + valid_groups = { g.id for g in sg_objs if (context.is_admin or not tenant_id or g.tenant_id == tenant_id or sg_obj.SecurityGroup.is_shared_with_project( context, g.id, tenant_id)) - ) + } requested_groups = set(port_sg) port_sg_missing = requested_groups - valid_groups diff --git a/neutron/db/securitygroups_rpc_base.py b/neutron/db/securitygroups_rpc_base.py index c4c8f16102d..5afb64d2b7a 100644 --- a/neutron/db/securitygroups_rpc_base.py +++ b/neutron/db/securitygroups_rpc_base.py @@ -61,25 +61,22 @@ class SecurityGroupServerNotifierRpcMixin(sg_db.SecurityGroupDbMixin): self.notify_security_groups_member_updated(context, port) def create_security_group_rule(self, context, security_group_rule): - rule = super(SecurityGroupServerNotifierRpcMixin, - self).create_security_group_rule(context, - security_group_rule) + rule = super().create_security_group_rule(context, + security_group_rule) sgids = [rule['security_group_id']] self.notifier.security_groups_rule_updated(context, sgids) return rule def create_security_group_rule_bulk(self, context, security_group_rules): - rules = super(SecurityGroupServerNotifierRpcMixin, - self).create_security_group_rule_bulk_native( + rules = super().create_security_group_rule_bulk_native( context, security_group_rules) - sgids = set([r['security_group_id'] for r in rules]) + sgids = {r['security_group_id'] for r in rules} self.notifier.security_groups_rule_updated(context, list(sgids)) return rules def delete_security_group_rule(self, context, sgrid): rule = self.get_security_group_rule(context, sgrid) - super(SecurityGroupServerNotifierRpcMixin, - self).delete_security_group_rule(context, sgrid) + super().delete_security_group_rule(context, sgrid) self.notifier.security_groups_rule_updated(context, [rule['security_group_id']]) @@ -136,7 +133,7 @@ class SecurityGroupServerNotifierRpcMixin(sg_db.SecurityGroupDbMixin): self.notify_security_groups_member_updated_bulk(context, [port]) -class SecurityGroupInfoAPIMixin(object): +class SecurityGroupInfoAPIMixin: """API for retrieving security group info for SG agent code.""" def get_port_from_device(self, context, device): @@ -346,7 +343,7 @@ class SecurityGroupInfoAPIMixin(object): # only allow DHCP servers to talk to the appropriate IP address # to avoid getting leases that don't match the Neutron IPs prefix = '32' if ip_version == 4 else '128' - dests = ['%s/%s' % (ip, prefix) for ip in port['fixed_ips'] + dests = ['{}/{}'.format(ip, prefix) for ip in port['fixed_ips'] if netaddr.IPNetwork(ip).version == ip_version] if ip_version == 4: # v4 dhcp servers can also talk to broadcast diff --git a/neutron/db/servicetype_db.py b/neutron/db/servicetype_db.py index fea1cde4253..c2d8989664b 100644 --- a/neutron/db/servicetype_db.py +++ b/neutron/db/servicetype_db.py @@ -23,7 +23,7 @@ from neutron.services import provider_configuration as pconf LOG = logging.getLogger(__name__) -class ServiceTypeManager(object): +class ServiceTypeManager: """Manage service type objects in Neutron.""" _instance = None diff --git a/neutron/db/standardattrdescription_db.py b/neutron/db/standardattrdescription_db.py index 8578eccf15b..23cc370db5e 100644 --- a/neutron/db/standardattrdescription_db.py +++ b/neutron/db/standardattrdescription_db.py @@ -17,7 +17,7 @@ from neutron_lib.db import standard_attr @resource_extend.has_resource_extenders -class StandardAttrDescriptionMixin(object): +class StandardAttrDescriptionMixin: supported_extension_aliases = ['standard-attr-description'] @staticmethod diff --git a/neutron/db/subnet_service_type_mixin.py b/neutron/db/subnet_service_type_mixin.py index 56051e45144..1710e9bd525 100644 --- a/neutron/db/subnet_service_type_mixin.py +++ b/neutron/db/subnet_service_type_mixin.py @@ -18,7 +18,7 @@ from neutron_lib.db import resource_extend @resource_extend.has_resource_extenders -class SubnetServiceTypeMixin(object): +class SubnetServiceTypeMixin: """Mixin class to extend subnet with service type attribute""" @staticmethod diff --git a/neutron/db/uplink_status_propagation_db.py b/neutron/db/uplink_status_propagation_db.py index 528fb9b67da..8e30bed7764 100644 --- a/neutron/db/uplink_status_propagation_db.py +++ b/neutron/db/uplink_status_propagation_db.py @@ -16,7 +16,7 @@ from neutron.objects.port.extensions import uplink_status_propagation as \ usp_obj -class UplinkStatusPropagationMixin(object): +class UplinkStatusPropagationMixin: """Mixin class to add uplink propagation to a port""" def _process_create_port(self, context, data, res): diff --git a/neutron/db/vlantransparent_db.py b/neutron/db/vlantransparent_db.py index cca7a9bea8c..314787beedd 100644 --- a/neutron/db/vlantransparent_db.py +++ b/neutron/db/vlantransparent_db.py @@ -18,7 +18,7 @@ from neutron_lib.db import resource_extend @resource_extend.has_resource_extenders -class Vlantransparent_db_mixin(object): +class Vlantransparent_db_mixin: """Mixin class to add vlan transparent methods to db_base_plugin_v2.""" @staticmethod diff --git a/neutron/extensions/address_group.py b/neutron/extensions/address_group.py index d91d0a50c13..1c0432d1b0b 100644 --- a/neutron/extensions/address_group.py +++ b/neutron/extensions/address_group.py @@ -47,7 +47,7 @@ class Address_group(api_extensions.APIExtensionDescriptor): return [ex] -class AddressGroupPluginBase(object, metaclass=abc.ABCMeta): +class AddressGroupPluginBase(metaclass=abc.ABCMeta): @abc.abstractmethod def create_address_group(self, context, address_group): diff --git a/neutron/extensions/address_scope.py b/neutron/extensions/address_scope.py index 89db4d740b5..5e42cea45ba 100644 --- a/neutron/extensions/address_scope.py +++ b/neutron/extensions/address_scope.py @@ -44,7 +44,7 @@ class Address_scope(api_extensions.APIExtensionDescriptor): return [ex] -class AddressScopePluginBase(object, metaclass=abc.ABCMeta): +class AddressScopePluginBase(metaclass=abc.ABCMeta): @abc.abstractmethod def create_address_scope(self, context, address_scope): diff --git a/neutron/extensions/agent.py b/neutron/extensions/agent.py index 3d4cc4e5815..9d1ea44abf6 100644 --- a/neutron/extensions/agent.py +++ b/neutron/extensions/agent.py @@ -44,7 +44,7 @@ class Agent(api_extensions.APIExtensionDescriptor): return [ex] -class AgentPluginBase(object, metaclass=abc.ABCMeta): +class AgentPluginBase(metaclass=abc.ABCMeta): """REST API to operate the Agent. All of method must be in an admin context. diff --git a/neutron/extensions/availability_zone.py b/neutron/extensions/availability_zone.py index 12931ab3fbe..3ea384ebad2 100644 --- a/neutron/extensions/availability_zone.py +++ b/neutron/extensions/availability_zone.py @@ -39,7 +39,7 @@ class Availability_zone(api_extensions.APIExtensionDescriptor): return [ex] -class AvailabilityZonePluginBase(object, metaclass=abc.ABCMeta): +class AvailabilityZonePluginBase(metaclass=abc.ABCMeta): """REST API to operate the Availability Zone.""" @abc.abstractmethod diff --git a/neutron/extensions/dhcpagentscheduler.py b/neutron/extensions/dhcpagentscheduler.py index e5a5787751b..4580cec0d07 100644 --- a/neutron/extensions/dhcpagentscheduler.py +++ b/neutron/extensions/dhcpagentscheduler.py @@ -103,7 +103,7 @@ class Dhcpagentscheduler(api_extensions.APIExtensionDescriptor): return exts -class DhcpAgentSchedulerPluginBase(object, metaclass=abc.ABCMeta): +class DhcpAgentSchedulerPluginBase(metaclass=abc.ABCMeta): """REST API to operate the DHCP agent scheduler. All of method must be in an admin context. diff --git a/neutron/extensions/dvr.py b/neutron/extensions/dvr.py index 7c9629975b9..aa28ce61ac1 100644 --- a/neutron/extensions/dvr.py +++ b/neutron/extensions/dvr.py @@ -24,7 +24,7 @@ class Dvr(extensions.APIExtensionDescriptor): api_definition = apidef -class DVRMacAddressPluginBase(object, metaclass=abc.ABCMeta): +class DVRMacAddressPluginBase(metaclass=abc.ABCMeta): @abc.abstractmethod def get_dvr_mac_address_list(self, context): diff --git a/neutron/extensions/floatingip_pools.py b/neutron/extensions/floatingip_pools.py index 93f607c72df..8ebc8198826 100644 --- a/neutron/extensions/floatingip_pools.py +++ b/neutron/extensions/floatingip_pools.py @@ -41,7 +41,7 @@ class Floatingip_pools(api_extensions.APIExtensionDescriptor): return resources -class FloatingIPPoolPluginBase(object, metaclass=abc.ABCMeta): +class FloatingIPPoolPluginBase(metaclass=abc.ABCMeta): @abc.abstractmethod def get_floatingip_pools(self, context, filters=None, fields=None, diff --git a/neutron/extensions/l3.py b/neutron/extensions/l3.py index 8d60ca19196..bccdcfc2482 100644 --- a/neutron/extensions/l3.py +++ b/neutron/extensions/l3.py @@ -41,7 +41,7 @@ class L3(extensions.APIExtensionDescriptor): register_quota=True) -class RouterPluginBase(object, metaclass=abc.ABCMeta): +class RouterPluginBase(metaclass=abc.ABCMeta): @abc.abstractmethod def create_router(self, context, router): diff --git a/neutron/extensions/l3agentscheduler.py b/neutron/extensions/l3agentscheduler.py index 6d5ac146e3b..aeda5496187 100644 --- a/neutron/extensions/l3agentscheduler.py +++ b/neutron/extensions/l3agentscheduler.py @@ -184,7 +184,7 @@ class RouterDoesntSupportScheduling(exceptions.Conflict): message = _("Router %(router_id)s does not support agent scheduling.") -class L3AgentSchedulerPluginBase(object, metaclass=abc.ABCMeta): +class L3AgentSchedulerPluginBase(metaclass=abc.ABCMeta): """REST API to operate the l3 agent scheduler. All of method must be in an admin context. diff --git a/neutron/extensions/metering_source_and_destination_fields.py b/neutron/extensions/metering_source_and_destination_fields.py index acc8d1effeb..a6477f43059 100644 --- a/neutron/extensions/metering_source_and_destination_fields.py +++ b/neutron/extensions/metering_source_and_destination_fields.py @@ -23,8 +23,7 @@ class Metering_source_and_destination_fields( @classmethod def get_extended_resources(cls, version): - sub_resource_map = super(Metering_source_and_destination_fields, cls - ).get_extended_resources(version) + sub_resource_map = super().get_extended_resources(version) processed_sub_resource_map = {} for value in sub_resource_map.values(): diff --git a/neutron/extensions/network_availability_zone.py b/neutron/extensions/network_availability_zone.py index bf436009271..3786df79fa1 100644 --- a/neutron/extensions/network_availability_zone.py +++ b/neutron/extensions/network_availability_zone.py @@ -24,7 +24,7 @@ class Network_availability_zone(extensions.APIExtensionDescriptor): api_definition = apidef -class NetworkAvailabilityZonePluginBase(object, metaclass=abc.ABCMeta): +class NetworkAvailabilityZonePluginBase(metaclass=abc.ABCMeta): @abc.abstractmethod def get_network_availability_zones(self, network): diff --git a/neutron/extensions/router_availability_zone.py b/neutron/extensions/router_availability_zone.py index f066ddf55ed..589ba937cc3 100644 --- a/neutron/extensions/router_availability_zone.py +++ b/neutron/extensions/router_availability_zone.py @@ -24,7 +24,7 @@ class Router_availability_zone(extensions.APIExtensionDescriptor): api_definition = apidef -class RouterAvailabilityZonePluginBase(object, metaclass=abc.ABCMeta): +class RouterAvailabilityZonePluginBase(metaclass=abc.ABCMeta): @abc.abstractmethod def get_router_availability_zones(self, router): diff --git a/neutron/extensions/security_groups_default_rules.py b/neutron/extensions/security_groups_default_rules.py index 70a494b3edd..fdc4c3db79b 100644 --- a/neutron/extensions/security_groups_default_rules.py +++ b/neutron/extensions/security_groups_default_rules.py @@ -179,7 +179,7 @@ class Security_groups_default_rules(api_extensions.ExtensionDescriptor): return [ex] -class SecurityGroupDefaultRulesPluginBase(object, metaclass=abc.ABCMeta): +class SecurityGroupDefaultRulesPluginBase(metaclass=abc.ABCMeta): @abc.abstractmethod def create_default_security_group_rule(self, context, sg_rule_template): diff --git a/neutron/extensions/securitygroup.py b/neutron/extensions/securitygroup.py index d159a85e45d..e0fb124605c 100644 --- a/neutron/extensions/securitygroup.py +++ b/neutron/extensions/securitygroup.py @@ -70,7 +70,7 @@ class SecurityGroupInUse(exceptions.InUse): def __init__(self, **kwargs): if 'reason' not in kwargs: kwargs['reason'] = _("in use") - super(SecurityGroupInUse, self).__init__(**kwargs) + super().__init__(**kwargs) class SecurityGroupCannotRemoveDefault(exceptions.InUse): @@ -132,7 +132,7 @@ class SecurityGroupRuleInUse(exceptions.InUse): def __init__(self, **kwargs): if 'reason' not in kwargs: kwargs['reason'] = _("in use") - super(SecurityGroupRuleInUse, self).__init__(**kwargs) + super().__init__(**kwargs) class SecurityGroupRuleParameterConflict(exceptions.InvalidInput): @@ -344,7 +344,7 @@ class Securitygroup(api_extensions.ExtensionDescriptor): return exts def update_attributes_map(self, attributes): - super(Securitygroup, self).update_attributes_map( + super().update_attributes_map( attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP) def get_extended_resources(self, version): @@ -358,7 +358,7 @@ class Securitygroup(api_extensions.ExtensionDescriptor): return [stdattr_ext.Standardattrdescription.get_alias()] -class SecurityGroupPluginBase(object, metaclass=abc.ABCMeta): +class SecurityGroupPluginBase(metaclass=abc.ABCMeta): @abc.abstractmethod def create_security_group(self, context, security_group): diff --git a/neutron/extensions/segment.py b/neutron/extensions/segment.py index 8d15716cd52..2f86b9ee530 100644 --- a/neutron/extensions/segment.py +++ b/neutron/extensions/segment.py @@ -43,7 +43,7 @@ class Segment(api_extensions.APIExtensionDescriptor): attr_map=attr_map)] -class SegmentPluginBase(object, metaclass=abc.ABCMeta): +class SegmentPluginBase(metaclass=abc.ABCMeta): @abc.abstractmethod def create_segment(self, context, segment): diff --git a/neutron/extensions/tagging.py b/neutron/extensions/tagging.py index fe8065239be..c43070117dd 100644 --- a/neutron/extensions/tagging.py +++ b/neutron/extensions/tagging.py @@ -101,7 +101,7 @@ def notify_tag_action(context, action, parent, parent_id, tags=None): notifier.info(context, tag_event, payload) -class TaggingController(object): +class TaggingController: def __init__(self): self.plugin = directory.get_plugin(TAG_PLUGIN_TYPE) self.supported_resources = TAG_SUPPORTED_RESOURCES @@ -142,7 +142,7 @@ class TaggingController(object): res, res_id, p_res, p_res_id = self._get_parent_resource_and_id( ctx, kwargs) target = self._get_target(ctx, res_id, p_res, p_res_id) - policy.enforce(ctx, 'get_%s_%s' % (res, TAGS), target) + policy.enforce(ctx, 'get_{}_{}'.format(res, TAGS), target) return self.plugin.get_tags(ctx, res, res_id) @_policy_init @@ -154,7 +154,7 @@ class TaggingController(object): res, res_id, p_res, p_res_id = self._get_parent_resource_and_id( ctx, kwargs) target = self._get_target(ctx, res_id, p_res, p_res_id, tag_id=id) - policy.enforce(ctx, 'get_%s_%s' % (res, TAGS), target) + policy.enforce(ctx, 'get_{}_{}'.format(res, TAGS), target) return self.plugin.get_tag(ctx, res, res_id, id) @_policy_init @@ -166,7 +166,7 @@ class TaggingController(object): res, res_id, p_res, p_res_id = self._get_parent_resource_and_id( ctx, kwargs) target = self._get_target(ctx, res_id, p_res, p_res_id) - policy.enforce(ctx, 'create_%s_%s' % (res, TAGS), target) + policy.enforce(ctx, 'create_{}_{}'.format(res, TAGS), target) notify_tag_action(ctx, 'create.start', res, res_id, body['tags']) result = self.plugin.create_tags(ctx, res, res_id, body) notify_tag_action(ctx, 'create.end', res, res_id, body['tags']) @@ -181,7 +181,7 @@ class TaggingController(object): res, res_id, p_res, p_res_id = self._get_parent_resource_and_id( ctx, kwargs) target = self._get_target(ctx, res_id, p_res, p_res_id, tag_id=id) - policy.enforce(ctx, 'update_%s_%s' % (res, TAGS), target) + policy.enforce(ctx, 'update_{}_{}'.format(res, TAGS), target) notify_tag_action(ctx, 'create.start', res, res_id, [id]) result = self.plugin.update_tag(ctx, res, res_id, id) notify_tag_action(ctx, 'create.end', res, res_id, [id]) @@ -196,7 +196,7 @@ class TaggingController(object): res, res_id, p_res, p_res_id = self._get_parent_resource_and_id( ctx, kwargs) target = self._get_target(ctx, res_id, p_res, p_res_id) - policy.enforce(ctx, 'update_%s_%s' % (res, TAGS), target) + policy.enforce(ctx, 'update_{}_{}'.format(res, TAGS), target) notify_tag_action(ctx, 'update.start', res, res_id, body['tags']) result = self.plugin.update_tags(ctx, res, res_id, body) notify_tag_action(ctx, 'update.end', res, res_id, @@ -212,7 +212,7 @@ class TaggingController(object): res, res_id, p_res, p_res_id = self._get_parent_resource_and_id( ctx, kwargs) target = self._get_target(ctx, res_id, p_res, p_res_id, tag_id=id) - policy.enforce(ctx, 'delete_%s_%s' % (res, TAGS), target) + policy.enforce(ctx, 'delete_{}_{}'.format(res, TAGS), target) notify_tag_action(ctx, 'delete.start', res, res_id, [id]) result = self.plugin.delete_tag(ctx, res, res_id, id) notify_tag_action(ctx, 'delete.end', res, res_id, [id]) @@ -225,7 +225,7 @@ class TaggingController(object): res, res_id, p_res, p_res_id = self._get_parent_resource_and_id( ctx, kwargs) target = self._get_target(ctx, res_id, p_res, p_res_id) - policy.enforce(ctx, 'delete_%s_%s' % (res, TAGS), target) + policy.enforce(ctx, 'delete_{}_{}'.format(res, TAGS), target) notify_tag_action(ctx, 'delete_all.start', res, res_id) result = self.plugin.delete_tags(ctx, res, res_id) notify_tag_action(ctx, 'delete_all.end', res, res_id) diff --git a/neutron/ipam/driver.py b/neutron/ipam/driver.py index 7040ab14274..5931e0e2bdd 100644 --- a/neutron/ipam/driver.py +++ b/neutron/ipam/driver.py @@ -18,7 +18,7 @@ from neutron.ipam import requests as ipam_req from neutron import manager -class Pool(object, metaclass=abc.ABCMeta): +class Pool(metaclass=abc.ABCMeta): """Interface definition for an IPAM driver. There should be an instance of the driver for every subnet pool. @@ -126,7 +126,7 @@ class Pool(object, metaclass=abc.ABCMeta): return True -class Subnet(object, metaclass=abc.ABCMeta): +class Subnet(metaclass=abc.ABCMeta): """Interface definition for an IPAM subnet A subnet would typically be associated with a network but may not be. It @@ -164,7 +164,7 @@ class Subnet(object, metaclass=abc.ABCMeta): """ -class SubnetGroup(object, metaclass=abc.ABCMeta): +class SubnetGroup(metaclass=abc.ABCMeta): """Interface definition for a filtered group of IPAM Subnets Allocates from a group of semantically equivalent subnets. The list of diff --git a/neutron/ipam/drivers/neutrondb_ipam/db_api.py b/neutron/ipam/drivers/neutrondb_ipam/db_api.py index d65ff504fac..a28f7e86e26 100644 --- a/neutron/ipam/drivers/neutrondb_ipam/db_api.py +++ b/neutron/ipam/drivers/neutrondb_ipam/db_api.py @@ -21,7 +21,7 @@ from neutron.objects import ipam as ipam_objs # Database operations for Neutron's DB-backed IPAM driver -class IpamSubnetManager(object): +class IpamSubnetManager: @classmethod def load_by_neutron_subnet_id(cls, context, neutron_subnet_id): diff --git a/neutron/ipam/drivers/neutrondb_ipam/db_models.py b/neutron/ipam/drivers/neutrondb_ipam/db_models.py index 09a8f39c230..fe50df9d11f 100644 --- a/neutron/ipam/drivers/neutrondb_ipam/db_models.py +++ b/neutron/ipam/drivers/neutrondb_ipam/db_models.py @@ -40,7 +40,7 @@ class IpamAllocationPool(model_base.BASEV2, model_base.HasId): last_ip = sa.Column(sa.String(64), nullable=False) def __repr__(self): - return "%s - %s" % (self.first_ip, self.last_ip) + return "{} - {}".format(self.first_ip, self.last_ip) class IpamSubnet(model_base.BASEV2, model_base.HasId): diff --git a/neutron/ipam/drivers/neutrondb_ipam/driver.py b/neutron/ipam/drivers/neutrondb_ipam/driver.py index 51b54056214..d1ae0ded05b 100644 --- a/neutron/ipam/drivers/neutrondb_ipam/driver.py +++ b/neutron/ipam/drivers/neutrondb_ipam/driver.py @@ -334,7 +334,7 @@ class NeutronDbPool(subnet_alloc.SubnetAllocator): :returns: a NeutronDbSubnet instance """ if self._subnetpool: - subnet = super(NeutronDbPool, self).allocate_subnet(subnet_request) + subnet = super().allocate_subnet(subnet_request) subnet_request = subnet.get_details() # SubnetRequest must be an instance of SpecificSubnet diff --git a/neutron/ipam/exceptions.py b/neutron/ipam/exceptions.py index d8f351ec990..0d207f2450f 100644 --- a/neutron/ipam/exceptions.py +++ b/neutron/ipam/exceptions.py @@ -79,7 +79,7 @@ class IPAllocationFailed(exceptions.NeutronException): class IpamValueInvalid(exceptions.Conflict): def __init__(self, message=None): self.message = message - super(IpamValueInvalid, self).__init__() + super().__init__() class DeferIpam(exceptions.NeutronException): diff --git a/neutron/ipam/requests.py b/neutron/ipam/requests.py index e7ed225e5a8..e4d6923dd4a 100644 --- a/neutron/ipam/requests.py +++ b/neutron/ipam/requests.py @@ -24,11 +24,11 @@ from neutron.ipam import exceptions as ipam_exc from neutron.ipam import utils as ipam_utils -class SubnetPool(object, metaclass=abc.ABCMeta): +class SubnetPool(metaclass=abc.ABCMeta): """Represents a pool of IPs available inside an address scope.""" -class SubnetRequest(object, metaclass=abc.ABCMeta): +class SubnetRequest(metaclass=abc.ABCMeta): """Carries the data needed to make a subnet request The data validated and carried by an instance of this class is the data @@ -152,7 +152,7 @@ class AnySubnetRequest(SubnetRequest): max allowed. :type prefixlen: int """ - super(AnySubnetRequest, self).__init__( + super().__init__( tenant_id=tenant_id, subnet_id=subnet_id, gateway_ip=gateway_ip, @@ -184,7 +184,7 @@ class SpecificSubnetRequest(SubnetRequest): the version of the address scope being used. :type subnet: netaddr.IPNetwork or convertible to one """ - super(SpecificSubnetRequest, self).__init__( + super().__init__( tenant_id=tenant_id, subnet_id=subnet_id, gateway_ip=gateway_ip, @@ -203,7 +203,7 @@ class SpecificSubnetRequest(SubnetRequest): return self._subnet_cidr.prefixlen -class AddressRequest(object, metaclass=abc.ABCMeta): +class AddressRequest(metaclass=abc.ABCMeta): """Abstract base class for address requests""" @@ -215,7 +215,7 @@ class SpecificAddressRequest(AddressRequest): :param address: The address being requested :type address: A netaddr.IPAddress or convertible to one. """ - super(SpecificAddressRequest, self).__init__() + super().__init__() self._address = netaddr.IPAddress(address) @property @@ -230,7 +230,7 @@ class BulkAddressRequest(AddressRequest): :param num_addresses: The quantity of IP addresses being requested :type num_addresses: int """ - super(BulkAddressRequest, self).__init__() + super().__init__() self._num_addresses = num_addresses @property @@ -251,7 +251,7 @@ class AutomaticAddressRequest(SpecificAddressRequest): EUI64 = 'eui64' def _generate_eui64_address(self, **kwargs): - if set(kwargs) != set(['prefix', 'mac']): + if set(kwargs) != {'prefix', 'mac'}: raise ipam_exc.AddressCalculationFailure( address_type='eui-64', reason=_('must provide exactly 2 arguments - cidr and MAC')) @@ -276,14 +276,14 @@ class AutomaticAddressRequest(SpecificAddressRequest): if not address_generator: raise ipam_exc.InvalidAddressType(address_type=address_type) address = address_generator(self, **kwargs) - super(AutomaticAddressRequest, self).__init__(address) + super().__init__(address) class RouterGatewayAddressRequest(AddressRequest): """Used to request allocating the special router gateway address.""" -class AddressRequestFactory(object): +class AddressRequestFactory: """Builds request using ip info Additional parameters(port and context) are not used in default @@ -315,7 +315,7 @@ class AddressRequestFactory(object): return AnyAddressRequest() -class SubnetRequestFactory(object): +class SubnetRequestFactory: """Builds request using subnet info""" @classmethod diff --git a/neutron/ipam/subnet_alloc.py b/neutron/ipam/subnet_alloc.py index 91cfe7850ad..61a0a8ab38b 100644 --- a/neutron/ipam/subnet_alloc.py +++ b/neutron/ipam/subnet_alloc.py @@ -39,7 +39,7 @@ class SubnetAllocator(driver.Pool): """ def __init__(self, subnetpool, context): - super(SubnetAllocator, self).__init__(subnetpool, context) + super().__init__(subnetpool, context) self._sp_helper = SubnetPoolHelper() def _lock_subnetpool(self): @@ -240,7 +240,7 @@ class IpamSubnetGroup(driver.SubnetGroup): raise ipam_exc.IpAddressGenerationFailureAllSubnets() -class SubnetPoolReader(object): +class SubnetPoolReader: '''Class to assist with reading a subnetpool, loading defaults, and inferring IP version from prefix list. Provides a common way of reading a stored model or a create request with default table @@ -369,7 +369,7 @@ class SubnetPoolReader(object): return [x.cidr for x in ip_set.iter_cidrs()] -class SubnetPoolHelper(object): +class SubnetPoolHelper: _PREFIX_VERSION_INFO = {4: {'max_prefixlen': constants.IPv4_BITS, 'wildcard': '0.0.0.0', diff --git a/neutron/manager.py b/neutron/manager.py index 4e22528615c..d1d114c3c91 100644 --- a/neutron/manager.py +++ b/neutron/manager.py @@ -50,7 +50,7 @@ class Manager(periodic_task.PeriodicTasks, metaclass=ManagerMeta): host = cfg.CONF.host self.host = host conf = getattr(self, "conf", cfg.CONF) - super(Manager, self).__init__(conf) + super().__init__(conf) def periodic_tasks(self, context, raise_on_error=False): self.run_periodic_tasks(context, raise_on_error=raise_on_error) @@ -89,7 +89,7 @@ def validate_pre_plugin_load(): return msg -class NeutronManager(object, metaclass=profiler.TracedMeta): +class NeutronManager(metaclass=profiler.TracedMeta): """Neutron's Manager class. Neutron's Manager class is responsible for parsing a config file and diff --git a/neutron/notifiers/batch_notifier.py b/neutron/notifiers/batch_notifier.py index 6dc729c35cd..1facdb29e1b 100644 --- a/neutron/notifiers/batch_notifier.py +++ b/neutron/notifiers/batch_notifier.py @@ -17,7 +17,7 @@ import eventlet from neutron.common import utils -class BatchNotifier(object): +class BatchNotifier: def __init__(self, batch_interval, callback): self._pending_events = eventlet.Queue() self.callback = callback diff --git a/neutron/notifiers/ironic.py b/neutron/notifiers/ironic.py index 69b702be909..48de218f270 100644 --- a/neutron/notifiers/ironic.py +++ b/neutron/notifiers/ironic.py @@ -37,7 +37,7 @@ IRONIC_CLIENT_VERSION = 1 @registry.has_registry_receivers -class Notifier(object): +class Notifier: _instance = None diff --git a/neutron/notifiers/nova.py b/neutron/notifiers/nova.py index 9cb6c3880e2..5a8d85fc586 100644 --- a/neutron/notifiers/nova.py +++ b/neutron/notifiers/nova.py @@ -57,7 +57,7 @@ _notifier_store = threading.local() @registry.has_registry_receivers -class Notifier(object): +class Notifier: _instance = None @@ -307,7 +307,7 @@ class Notifier(object): response_error = True continue if hasattr(response, 'request_ids'): - msg = "Nova event matching {}".format(response.request_ids) + msg = f"Nova event matching {response.request_ids}" else: msg = "Nova event" if code != 200: diff --git a/neutron/objects/address_group.py b/neutron/objects/address_group.py index a444c3c687d..3851764492c 100644 --- a/neutron/objects/address_group.py +++ b/neutron/objects/address_group.py @@ -86,14 +86,14 @@ class AddressAssociation(base.NeutronDbObject): @classmethod def modify_fields_to_db(cls, fields): - result = super(AddressAssociation, cls).modify_fields_to_db(fields) + result = super().modify_fields_to_db(fields) if 'address' in result: result['address'] = cls.filter_to_str(result['address']) return result @classmethod def modify_fields_from_db(cls, db_obj): - fields = super(AddressAssociation, cls).modify_fields_from_db(db_obj) + fields = super().modify_fields_from_db(db_obj) if 'address' in fields: fields['address'] = netaddr.IPNetwork(fields['address']) return fields diff --git a/neutron/objects/agent.py b/neutron/objects/agent.py index 62b7daea14b..0c801ec64a4 100644 --- a/neutron/objects/agent.py +++ b/neutron/objects/agent.py @@ -56,7 +56,7 @@ class Agent(base.NeutronDbObject): @classmethod def modify_fields_to_db(cls, fields): - result = super(Agent, cls).modify_fields_to_db(fields) + result = super().modify_fields_to_db(fields) if ('configurations' in result and not isinstance(result['configurations'], obj_utils.StringMatchingFilterObj)): @@ -73,7 +73,7 @@ class Agent(base.NeutronDbObject): @classmethod def modify_fields_from_db(cls, db_obj): - fields = super(Agent, cls).modify_fields_from_db(db_obj) + fields = super().modify_fields_from_db(db_obj) if 'configurations' in fields: # load string from DB, set {} if configuration is '' fields['configurations'] = ( @@ -85,7 +85,7 @@ class Agent(base.NeutronDbObject): return fields def obj_make_compatible(self, primitive, target_version): - super(Agent, self).obj_make_compatible(primitive, target_version) + super().obj_make_compatible(primitive, target_version) _target_version = versionutils.convert_version_to_tuple(target_version) if _target_version < (1, 1): primitive.pop('resources_synced', None) diff --git a/neutron/objects/base.py b/neutron/objects/base.py index acde5c1b04b..badecb3a8c7 100644 --- a/neutron/objects/base.py +++ b/neutron/objects/base.py @@ -86,7 +86,7 @@ def register_filter_hook_on_model(model, filter_name): obj_class.add_extra_filter_name(filter_name) -class LazyQueryIterator(object): +class LazyQueryIterator: def __init__(self, obj_class, lazy_query): self.obj_class = obj_class self.context = None @@ -105,7 +105,7 @@ class LazyQueryIterator(object): return item -class Pager(object): +class Pager: '''Pager class This class represents a pager object. It is consumed by get_objects to @@ -187,7 +187,7 @@ class NeutronObject(obj_base.VersionedObject, lazy_fields = set() def __init__(self, context=None, **kwargs): - super(NeutronObject, self).__init__(context, **kwargs) + super().__init__(context, **kwargs) self._load_synthetic_fields = True self.obj_set_defaults() @@ -373,7 +373,7 @@ def _guarantee_rw_subtransaction(func): class DeclarativeObject(abc.ABCMeta): def __init__(cls, name, bases, dct): - super(DeclarativeObject, cls).__init__(name, bases, dct) + super().__init__(name, bases, dct) # TODO(ralonsoh): remove once bp/keystone-v3 migration finishes. if 'project_id' in cls.fields: obj_extra_fields_set = set(cls.obj_extra_fields) @@ -474,7 +474,7 @@ class NeutronDbObject(NeutronObject, metaclass=DeclarativeObject): # obj_extra_fields = [] def __init__(self, *args, **kwargs): - super(NeutronDbObject, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self._captured_db_model = None @property @@ -593,7 +593,7 @@ class NeutronDbObject(NeutronObject, metaclass=DeclarativeObject): try: is_attr_nullable = self.fields[attrname].nullable except KeyError: - return super(NeutronDbObject, self).obj_load_attr(attrname) + return super().obj_load_attr(attrname) if is_attr_nullable: self[attrname] = None @@ -728,7 +728,7 @@ class NeutronDbObject(NeutronObject, metaclass=DeclarativeObject): # update revision numbers db_obj = None if cls.has_standard_attributes(): - return super(NeutronDbObject, cls).update_object( + return super().update_object( context, values, validate_filters=False, **kwargs) else: with cls.db_context_writer(context): @@ -756,7 +756,7 @@ class NeutronDbObject(NeutronObject, metaclass=DeclarativeObject): # if we have standard attributes, we will need to fetch records to # update revision numbers if cls.has_standard_attributes(): - return super(NeutronDbObject, cls).update_objects( + return super().update_objects( context, values, validate_filters=False, **kwargs) return obj_db_api.update_objects( cls, context, diff --git a/neutron/objects/db/api.py b/neutron/objects/db/api.py index 8ece717d990..7ee9b2670fe 100644 --- a/neutron/objects/db/api.py +++ b/neutron/objects/db/api.py @@ -83,10 +83,10 @@ def _safe_get_object(obj_cls, context, **kwargs): db_obj = get_object(obj_cls, context, **kwargs) if db_obj is None: - key = ", ".join(['%s=%s' % (key, value) for (key, value) + key = ", ".join(['{}={}'.format(key, value) for (key, value) in kwargs.items()]) raise n_exc.ObjectNotFound( - id="%s(%s)" % (obj_cls.db_model.__name__, key)) + id="{}({})".format(obj_cls.db_model.__name__, key)) return db_obj diff --git a/neutron/objects/flavor.py b/neutron/objects/flavor.py index f58d9dfcfd6..183cbf6bfef 100644 --- a/neutron/objects/flavor.py +++ b/neutron/objects/flavor.py @@ -54,7 +54,7 @@ class ServiceProfile(base.NeutronDbObject): } def from_db_object(self, db_obj): - super(ServiceProfile, self).from_db_object(db_obj) + super().from_db_object(db_obj) if db_obj.get('flavors', []): self.flavor_ids = { fl.flavor_id @@ -85,7 +85,7 @@ class Flavor(base.NeutronDbObject): } def from_db_object(self, db_obj): - super(Flavor, self).from_db_object(db_obj) + super().from_db_object(db_obj) if db_obj.get('service_profiles', []): self.service_profile_ids = { sp.service_profile_id diff --git a/neutron/objects/ipam.py b/neutron/objects/ipam.py index d562f3b844b..d7243f6375c 100644 --- a/neutron/objects/ipam.py +++ b/neutron/objects/ipam.py @@ -41,7 +41,7 @@ class IpamAllocationPool(base.NeutronDbObject): @classmethod def modify_fields_from_db(cls, db_obj): - result = super(IpamAllocationPool, cls).modify_fields_from_db(db_obj) + result = super().modify_fields_from_db(db_obj) if 'first_ip' in result: result['first_ip'] = netaddr.IPAddress(result['first_ip']) if 'last_ip' in result: @@ -50,7 +50,7 @@ class IpamAllocationPool(base.NeutronDbObject): @classmethod def modify_fields_to_db(cls, fields): - result = super(IpamAllocationPool, cls).modify_fields_to_db(fields) + result = super().modify_fields_to_db(fields) if 'first_ip' in result: result['first_ip'] = cls.filter_to_str(result['first_ip']) if 'last_ip' in result: @@ -75,14 +75,14 @@ class IpamAllocation(base.NeutronDbObject): @classmethod def modify_fields_from_db(cls, db_obj): - result = super(IpamAllocation, cls).modify_fields_from_db(db_obj) + result = super().modify_fields_from_db(db_obj) if 'ip_address' in result: result['ip_address'] = netaddr.IPAddress(result['ip_address']) return result @classmethod def modify_fields_to_db(cls, fields): - result = super(IpamAllocation, cls).modify_fields_to_db(fields) + result = super().modify_fields_to_db(fields) if 'ip_address' in result: result['ip_address'] = cls.filter_to_str(result['ip_address']) return result diff --git a/neutron/objects/local_ip.py b/neutron/objects/local_ip.py index f243a89f049..cc808728c6b 100644 --- a/neutron/objects/local_ip.py +++ b/neutron/objects/local_ip.py @@ -48,7 +48,7 @@ class LocalIP(base.NeutronDbObject): @classmethod def modify_fields_to_db(cls, fields): - result = super(LocalIP, cls).modify_fields_to_db(fields) + result = super().modify_fields_to_db(fields) if 'local_ip_address' in result: result['local_ip_address'] = cls.filter_to_str( result['local_ip_address']) @@ -56,7 +56,7 @@ class LocalIP(base.NeutronDbObject): @classmethod def modify_fields_from_db(cls, db_obj): - fields = super(LocalIP, cls).modify_fields_from_db(db_obj) + fields = super().modify_fields_from_db(db_obj) if 'local_ip_address' in fields: fields['local_ip_address'] = netaddr.IPAddress( fields['local_ip_address']) @@ -86,14 +86,14 @@ class LocalIPAssociation(base.NeutronDbObject): @classmethod def modify_fields_to_db(cls, fields): - result = super(LocalIPAssociation, cls).modify_fields_to_db(fields) + result = super().modify_fields_to_db(fields) if 'fixed_ip' in result: result['fixed_ip'] = cls.filter_to_str(result['fixed_ip']) return result @classmethod def modify_fields_from_db(cls, db_obj): - fields = super(LocalIPAssociation, cls).modify_fields_from_db(db_obj) + fields = super().modify_fields_from_db(db_obj) if 'fixed_ip' in fields: fields['fixed_ip'] = netaddr.IPAddress(fields['fixed_ip']) return fields @@ -101,10 +101,10 @@ class LocalIPAssociation(base.NeutronDbObject): def obj_load_attr(self, attrname): if attrname in ['id']: self._set_id() - super(LocalIPAssociation, self).obj_load_attr(attrname) + super().obj_load_attr(attrname) def from_db_object(self, db_obj): - super(LocalIPAssociation, self).from_db_object(db_obj) + super().from_db_object(db_obj) self._set_id() def _set_id(self): diff --git a/neutron/objects/metering.py b/neutron/objects/metering.py index 3095188dd28..acc76d0a937 100644 --- a/neutron/objects/metering.py +++ b/neutron/objects/metering.py @@ -44,7 +44,7 @@ class MeteringLabelRule(base.NeutronDbObject): @classmethod def modify_fields_from_db(cls, db_obj): - result = super(MeteringLabelRule, cls).modify_fields_from_db(db_obj) + result = super().modify_fields_from_db(db_obj) cls.ip_field_from_db(result, "remote_ip_prefix") cls.ip_field_from_db(result, "source_ip_prefix") @@ -60,7 +60,7 @@ class MeteringLabelRule(base.NeutronDbObject): @classmethod def modify_fields_to_db(cls, fields): - result = super(MeteringLabelRule, cls).modify_fields_to_db(fields) + result = super().modify_fields_to_db(fields) cls.ip_field_to_db(result, "remote_ip_prefix") cls.ip_field_to_db(result, "source_ip_prefix") diff --git a/neutron/objects/ndp_proxy.py b/neutron/objects/ndp_proxy.py index 5e5c5b53181..9460c888af5 100644 --- a/neutron/objects/ndp_proxy.py +++ b/neutron/objects/ndp_proxy.py @@ -49,7 +49,7 @@ class NDPProxy(base.NeutronDbObject): @classmethod def modify_fields_from_db(cls, db_obj): - result = super(NDPProxy, cls).modify_fields_from_db(db_obj) + result = super().modify_fields_from_db(db_obj) if 'ip_address' in result: result['ip_address'] = netaddr.IPAddress( result['ip_address']) @@ -57,7 +57,7 @@ class NDPProxy(base.NeutronDbObject): @classmethod def modify_fields_to_db(cls, fields): - result = super(NDPProxy, cls).modify_fields_to_db(fields) + result = super().modify_fields_to_db(fields) if 'ip_address' in result: if result['ip_address'] is not None: result['ip_address'] = cls.filter_to_str( diff --git a/neutron/objects/network.py b/neutron/objects/network.py index 111a24cf70c..0f2f5a6e6af 100644 --- a/neutron/objects/network.py +++ b/neutron/objects/network.py @@ -115,14 +115,14 @@ class NetworkSegment(base.NeutronDbObject): hosts = self.hosts if hosts is None: hosts = [] - super(NetworkSegment, self).create() + super().create() if 'hosts' in fields: self._attach_hosts(hosts) def update(self): fields = self.obj_get_changes() with self.db_context_writer(self.obj_context): - super(NetworkSegment, self).update() + super().update() if 'hosts' in fields: self._attach_hosts(fields['hosts']) @@ -140,7 +140,7 @@ class NetworkSegment(base.NeutronDbObject): def obj_load_attr(self, attrname): if attrname == 'hosts': return self._load_hosts() - super(NetworkSegment, self).obj_load_attr(attrname) + super().obj_load_attr(attrname) def _load_hosts(self, db_obj=None): if db_obj: @@ -153,7 +153,7 @@ class NetworkSegment(base.NeutronDbObject): self.obj_reset_changes(['hosts']) def from_db_object(self, db_obj): - super(NetworkSegment, self).from_db_object(db_obj) + super().from_db_object(db_obj) self._load_hosts(db_obj) @classmethod @@ -165,8 +165,8 @@ class NetworkSegment(base.NeutronDbObject): _pager.sorts = [ (field, True) for field in ('network_id', 'segment_index') ] - return super(NetworkSegment, cls).get_objects(context, _pager, - **kwargs) + return super().get_objects(context, _pager, + **kwargs) @base.NeutronObjectRegistry.register @@ -252,7 +252,7 @@ class Network(rbac_db.NeutronRbacObject): with self.db_context_writer(self.obj_context): dns_domain = self.dns_domain qos_policy_id = self.qos_policy_id - super(Network, self).create() + super().create() if 'dns_domain' in fields: self._set_dns_domain(dns_domain) if 'qos_policy_id' in fields: @@ -261,7 +261,7 @@ class Network(rbac_db.NeutronRbacObject): def update(self): fields = self.obj_get_changes() with self.db_context_writer(self.obj_context): - super(Network, self).update() + super().update() if 'dns_domain' in fields: self._set_dns_domain(fields['dns_domain']) if 'qos_policy_id' in fields: @@ -288,7 +288,7 @@ class Network(rbac_db.NeutronRbacObject): @classmethod def modify_fields_from_db(cls, db_obj): - result = super(Network, cls).modify_fields_from_db(db_obj) + result = super().modify_fields_from_db(db_obj) if az_def.AZ_HINTS in result: result[az_def.AZ_HINTS] = ( az_validator.convert_az_string_to_list( @@ -297,7 +297,7 @@ class Network(rbac_db.NeutronRbacObject): @classmethod def modify_fields_to_db(cls, fields): - result = super(Network, cls).modify_fields_to_db(fields) + result = super().modify_fields_to_db(fields) if az_def.AZ_HINTS in result: result[az_def.AZ_HINTS] = ( az_validator.convert_az_list_to_string( @@ -305,7 +305,7 @@ class Network(rbac_db.NeutronRbacObject): return result def from_db_object(self, *objs): - super(Network, self).from_db_object(*objs) + super().from_db_object(*objs) for db_obj in objs: # extract domain name if db_obj.get('dns_domain'): @@ -376,4 +376,4 @@ class NetworkDNSDomain(base.NeutronDbObject): id=port_id).one_or_none() if net_dns is None: return None - return super(NetworkDNSDomain, cls)._load_object(context, net_dns) + return super()._load_object(context, net_dns) diff --git a/neutron/objects/network_segment_range.py b/neutron/objects/network_segment_range.py index fd66a5e8058..d24352dace7 100644 --- a/neutron/objects/network_segment_range.py +++ b/neutron/objects/network_segment_range.py @@ -72,7 +72,7 @@ class NetworkSegmentRange(base.NeutronDbObject): } def to_dict(self, fields=None): - _dict = super(NetworkSegmentRange, self).to_dict() + _dict = super().to_dict() # extend the network segment range dict with `available` and `used` # fields _dict.update({'available': self._get_available_allocation()}) @@ -93,11 +93,11 @@ class NetworkSegmentRange(base.NeutronDbObject): def create(self): self._check_shared_project_id('create') - super(NetworkSegmentRange, self).create() + super().create() def update(self): self._check_shared_project_id('update') - super(NetworkSegmentRange, self).update() + super().update() def _get_allocation_model_details(self): model = models_map.get(self.network_type) @@ -147,8 +147,8 @@ class NetworkSegmentRange(base.NeutronDbObject): @classmethod def _build_query_segments(cls, context, model, network_type, **filters): columns = set(dict(model.__table__.columns)) - model_filters = dict((k, filters[k]) - for k in columns & set(filters.keys())) + model_filters = {k: filters[k] + for k in columns & set(filters.keys())} query = (context.session.query(model) .filter_by(allocated=False, **model_filters).distinct()) _and = and_( @@ -188,7 +188,7 @@ class NetworkSegmentRange(base.NeutronDbObject): 'physical_network' in _filters): shared_ranges.filter(cls.db_model.physical_network == _filters['physical_network']) - segment_ids = set([]) + segment_ids = set() for shared_range in shared_ranges.all(): segment_ids.update(set(range(shared_range.minimum, shared_range.maximum + 1))) diff --git a/neutron/objects/plugins/ml2/base.py b/neutron/objects/plugins/ml2/base.py index 7abb099a312..8046811b63c 100644 --- a/neutron/objects/plugins/ml2/base.py +++ b/neutron/objects/plugins/ml2/base.py @@ -26,27 +26,27 @@ class EndpointBase(base.NeutronDbObject): @classmethod def modify_fields_from_db(cls, db_obj): - result = super(EndpointBase, cls).modify_fields_from_db(db_obj) + result = super().modify_fields_from_db(db_obj) if 'ip_address' in result: result['ip_address'] = netaddr.IPAddress(result['ip_address']) return result @classmethod def modify_fields_to_db(cls, fields): - result = super(EndpointBase, cls).modify_fields_to_db(fields) + result = super().modify_fields_to_db(fields) if 'ip_address' in fields: result['ip_address'] = cls.filter_to_str(result['ip_address']) return result -class SegmentAllocation(object, metaclass=abc.ABCMeta): +class SegmentAllocation(metaclass=abc.ABCMeta): @classmethod def get_random_unallocated_segment(cls, context, **filters): with cls.db_context_reader(context): columns = set(dict(cls.db_model.__table__.columns)) - model_filters = dict((k, filters[k]) - for k in columns & set(filters.keys())) + model_filters = {k: filters[k] + for k in columns & set(filters.keys())} query = context.session.query(cls.db_model).filter_by( allocated=False, **model_filters) rand_func = n_utils.get_sql_random_method( diff --git a/neutron/objects/port/extensions/allowedaddresspairs.py b/neutron/objects/port/extensions/allowedaddresspairs.py index aabf8747774..dff4f0f5986 100644 --- a/neutron/objects/port/extensions/allowedaddresspairs.py +++ b/neutron/objects/port/extensions/allowedaddresspairs.py @@ -41,7 +41,7 @@ class AllowedAddressPair(base.NeutronDbObject): # custom types. @classmethod def modify_fields_to_db(cls, fields): - result = super(AllowedAddressPair, cls).modify_fields_to_db(fields) + result = super().modify_fields_to_db(fields) if 'ip_address' in result: result['ip_address'] = cls.filter_to_str(result['ip_address']) if 'mac_address' in result: @@ -52,7 +52,7 @@ class AllowedAddressPair(base.NeutronDbObject): # custom types. @classmethod def modify_fields_from_db(cls, db_obj): - fields = super(AllowedAddressPair, cls).modify_fields_from_db(db_obj) + fields = super().modify_fields_from_db(db_obj) if 'ip_address' in fields: # retain string format as stored in the database fields['ip_address'] = net_utils.AuthenticIPNetwork( diff --git a/neutron/objects/port/extensions/port_hints.py b/neutron/objects/port/extensions/port_hints.py index a9fd1c793f0..3bbfef3a0c9 100644 --- a/neutron/objects/port/extensions/port_hints.py +++ b/neutron/objects/port/extensions/port_hints.py @@ -36,7 +36,7 @@ class PortHints(base.NeutronDbObject): @classmethod def modify_fields_to_db(cls, fields): - result = super(PortHints, cls).modify_fields_to_db(fields) + result = super().modify_fields_to_db(fields) if 'hints' in result: # dump field into string, set '' if empty '{}' or None result['hints'] = ( @@ -45,7 +45,7 @@ class PortHints(base.NeutronDbObject): @classmethod def modify_fields_from_db(cls, db_obj): - fields = super(PortHints, cls).modify_fields_from_db(db_obj) + fields = super().modify_fields_from_db(db_obj) if 'hints' in fields: # load string from DB into dict, set None if hints is '' fields['hints'] = ( diff --git a/neutron/objects/port_forwarding.py b/neutron/objects/port_forwarding.py index 6372c1589b5..3a0a1d42923 100644 --- a/neutron/objects/port_forwarding.py +++ b/neutron/objects/port_forwarding.py @@ -103,7 +103,7 @@ class PortForwarding(base.NeutronDbObject): )] if ":" not in intrn_port_range: - intrn_port_range = "%s:%s" % (intrn_port_range, intrn_port_range) + intrn_port_range = "{ipr}:{ipr}".format(ipr=intrn_port_range) extrn_min, extrn_max = map(int, extrn_port_range.split(':')) intrn_min, intrn_max = map(int, intrn_port_range.split(':')) @@ -126,7 +126,7 @@ class PortForwarding(base.NeutronDbObject): def obj_load_attr(self, attrname): if attrname in ['floating_ip_address', 'router_id']: return self._load_attr_from_fip(attrname) - super(PortForwarding, self).obj_load_attr(attrname) + super().obj_load_attr(attrname) def _load_attr_from_fip(self, attrname): value = getattr(self.db_obj.floating_ip, attrname) @@ -134,7 +134,7 @@ class PortForwarding(base.NeutronDbObject): self.obj_reset_changes([attrname]) def from_db_object(self, db_obj): - super(PortForwarding, self).from_db_object(db_obj) + super().from_db_object(db_obj) self._load_attr_from_fip(attrname='router_id') self._load_attr_from_fip(attrname='floating_ip_address') @@ -206,10 +206,10 @@ class PortForwarding(base.NeutronDbObject): if not internal_port_start or not external_port_start: return - result['external_port_range'] = '%s:%s' % (external_port_start, - external_port_end) - result['internal_port_range'] = '%s:%s' % (internal_port_start, - internal_port_end) + result['external_port_range'] = '{}:{}'.format(external_port_start, + external_port_end) + result['internal_port_range'] = '{}:{}'.format(internal_port_start, + internal_port_end) @staticmethod def _modify_single_ports_from_db(result, @@ -228,7 +228,7 @@ class PortForwarding(base.NeutronDbObject): @classmethod def modify_fields_from_db(cls, db_obj): - result = super(PortForwarding, cls).modify_fields_from_db(db_obj) + result = super().modify_fields_from_db(db_obj) if 'internal_ip_address' in result: result['internal_ip_address'] = netaddr.IPAddress( result['internal_ip_address'], version=lib_const.IP_VERSION_4) @@ -254,7 +254,7 @@ class PortForwarding(base.NeutronDbObject): @classmethod def modify_fields_to_db(cls, fields): - result = super(PortForwarding, cls).modify_fields_to_db(fields) + result = super().modify_fields_to_db(fields) cls._modify_ports_range_to_db(result) cls._modify_single_ports_to_db(result) if 'internal_ip_address' in result: diff --git a/neutron/objects/ports.py b/neutron/objects/ports.py index 9a4b6b22260..62437dada89 100644 --- a/neutron/objects/ports.py +++ b/neutron/objects/ports.py @@ -44,7 +44,7 @@ class PortBindingBase(base.NeutronDbObject): @classmethod def modify_fields_to_db(cls, fields): - result = super(PortBindingBase, cls).modify_fields_to_db(fields) + result = super().modify_fields_to_db(fields) for field in ['profile', 'vif_details']: if field in result: # dump field into string, set '' if empty '{}' or None @@ -54,7 +54,7 @@ class PortBindingBase(base.NeutronDbObject): @classmethod def modify_fields_from_db(cls, db_obj): - fields = super(PortBindingBase, cls).modify_fields_from_db(db_obj) + fields = super().modify_fields_from_db(db_obj) if 'vif_details' in fields: # load string from DB into dict, set None if vif_details is '' fields['vif_details'] = ( @@ -190,7 +190,7 @@ class PortBindingLevel(base.NeutronDbObject): if not _pager.sorts: # (NOTE) True means ASC, False is DESC _pager.sorts = [('port_id', True), ('level', True)] - return super(PortBindingLevel, cls).get_objects( + return super().get_objects( context, _pager, validate_filters, **kwargs) def obj_make_compatible(self, primitive, target_version): @@ -225,7 +225,7 @@ class IPAllocation(base.NeutronDbObject): # custom types. @classmethod def modify_fields_to_db(cls, fields): - result = super(IPAllocation, cls).modify_fields_to_db(fields) + result = super().modify_fields_to_db(fields) if 'ip_address' in result: result['ip_address'] = cls.filter_to_str(result['ip_address']) return result @@ -234,7 +234,7 @@ class IPAllocation(base.NeutronDbObject): # custom types. @classmethod def modify_fields_from_db(cls, db_obj): - fields = super(IPAllocation, cls).modify_fields_from_db(db_obj) + fields = super().modify_fields_from_db(db_obj) if 'ip_address' in fields: fields['ip_address'] = netaddr.IPAddress(fields['ip_address']) return fields @@ -256,7 +256,7 @@ class IPAllocation(base.NeutronDbObject): filter(models_v2.Port.device_owner. in_(device_owner)).first()) if exclude and alloc_db: - return super(IPAllocation, cls)._load_object(context, alloc_db) + return super()._load_object(context, alloc_db) if alloc_db: return True @@ -265,7 +265,7 @@ class IPAllocation(base.NeutronDbObject): allocs = context.session.query(models_v2.IPAllocation).filter_by( subnet_id=subnet_id).all() for alloc in allocs: - alloc_obj = super(IPAllocation, cls)._load_object(context, alloc) + alloc_obj = super()._load_object(context, alloc) alloc_obj.delete() @classmethod @@ -439,7 +439,7 @@ class Port(base.NeutronDbObject): if sg_ids is None: sg_ids = set() qos_policy_id = self.qos_policy_id - super(Port, self).create() + super().create() if 'security_group_ids' in fields: self._attach_security_groups(sg_ids) if 'qos_policy_id' in fields: @@ -448,7 +448,7 @@ class Port(base.NeutronDbObject): def update(self): fields = self.obj_get_changes() with self.db_context_writer(self.obj_context): - super(Port, self).update() + super().update() if 'security_group_ids' in fields: self._attach_security_groups(fields['security_group_ids']) if 'qos_policy_id' in fields: @@ -493,9 +493,9 @@ class Port(base.NeutronDbObject): kwargs['id'] = list(set(port_ids) & set(ports_with_sg)) else: kwargs['id'] = ports_with_sg - port_array = super(Port, cls).get_objects(context, _pager, - validate_filters, - **kwargs) + port_array = super().get_objects(context, _pager, + validate_filters, + **kwargs) sg_count = len(security_group_ids) if security_group_ids else 0 LOG.debug("Time-cost: Fetching %(port_count)s ports in %(sg_count)s " "security groups", @@ -527,7 +527,7 @@ class Port(base.NeutronDbObject): @classmethod def modify_fields_to_db(cls, fields): - result = super(Port, cls).modify_fields_to_db(fields) + result = super().modify_fields_to_db(fields) # TODO(rossella_s): get rid of it once we switch the db model to using # custom types. @@ -543,7 +543,7 @@ class Port(base.NeutronDbObject): @classmethod def modify_fields_from_db(cls, db_obj): - fields = super(Port, cls).modify_fields_from_db(db_obj) + fields = super().modify_fields_from_db(db_obj) # TODO(rossella_s): get rid of it once we switch the db model to using # custom types. @@ -560,7 +560,7 @@ class Port(base.NeutronDbObject): return fields def from_db_object(self, db_obj): - super(Port, self).from_db_object(db_obj) + super().from_db_object(db_obj) # extract security group bindings if db_obj.get('security_groups', []): self.security_group_ids = { diff --git a/neutron/objects/qos/binding.py b/neutron/objects/qos/binding.py index ef302f10af6..581ce90a6be 100644 --- a/neutron/objects/qos/binding.py +++ b/neutron/objects/qos/binding.py @@ -26,7 +26,7 @@ from neutron.db.qos import models as qos_db_model from neutron.objects import base -class _QosPolicyBindingMixin(object, metaclass=abc.ABCMeta): +class _QosPolicyBindingMixin(metaclass=abc.ABCMeta): _bound_model_id = None diff --git a/neutron/objects/qos/policy.py b/neutron/objects/qos/policy.py index 955e1ef93c2..f62b2efdc59 100644 --- a/neutron/objects/qos/policy.py +++ b/neutron/objects/qos/policy.py @@ -99,7 +99,7 @@ class QosPolicy(rbac_db.NeutronRbacObject): return self._reload_rules() elif attrname == 'is_default': return self._reload_is_default() - return super(QosPolicy, self).obj_load_attr(attrname) + return super().obj_load_attr(attrname) def _reload_rules(self): rules = rule_obj_impl.get_rules(self, self.obj_context, self.id) @@ -126,7 +126,7 @@ class QosPolicy(rbac_db.NeutronRbacObject): rule_id=rule_id) def to_dict(self): - _dict = super(QosPolicy, self).to_dict() + _dict = super().to_dict() resource_extend.apply_funcs(qos_def.POLICIES, _dict, self.db_obj) return _dict @@ -150,7 +150,7 @@ class QosPolicy(rbac_db.NeutronRbacObject): @classmethod def get_object(cls, context, **kwargs): - policy_obj = super(QosPolicy, cls).get_object(context, **kwargs) + policy_obj = super().get_object(context, **kwargs) if not policy_obj: return @@ -161,9 +161,9 @@ class QosPolicy(rbac_db.NeutronRbacObject): @classmethod def get_objects(cls, context, _pager=None, validate_filters=True, **kwargs): - objs = super(QosPolicy, cls).get_objects(context, _pager, - validate_filters, - **kwargs) + objs = super().get_objects(context, _pager, + validate_filters, + **kwargs) result = [] for obj in objs: obj.obj_load_attr('rules') @@ -203,7 +203,7 @@ class QosPolicy(rbac_db.NeutronRbacObject): # TODO(QoS): Consider extending base to trigger registered methods for us def create(self): with self.db_context_writer(self.obj_context): - super(QosPolicy, self).create() + super().create() if self.is_default: self.set_default() self.obj_load_attr('rules') @@ -215,7 +215,7 @@ class QosPolicy(rbac_db.NeutronRbacObject): self.set_default() else: self.unset_default() - super(QosPolicy, self).update() + super().update() def delete(self): with self.db_context_writer(self.obj_context): @@ -230,7 +230,7 @@ class QosPolicy(rbac_db.NeutronRbacObject): object_type=object_type, object_id=binding_obj[0]['%s_id' % object_type]) - super(QosPolicy, self).delete() + super().delete() def attach_network(self, network_id): network_binding = {'policy_id': self.id, diff --git a/neutron/objects/qos/rule.py b/neutron/objects/qos/rule.py index d5a5ce7a179..dc61f899faf 100644 --- a/neutron/objects/qos/rule.py +++ b/neutron/objects/qos/rule.py @@ -88,7 +88,7 @@ class QosRule(base.NeutronDbObject, metaclass=abc.ABCMeta): return True def to_dict(self): - dict_ = super(QosRule, self).to_dict() + dict_ = super().to_dict() dict_['type'] = self.rule_type return dict_ diff --git a/neutron/objects/qos/rule_type.py b/neutron/objects/qos/rule_type.py index c8d44b43fe1..302e8c49274 100644 --- a/neutron/objects/qos/rule_type.py +++ b/neutron/objects/qos/rule_type.py @@ -24,7 +24,7 @@ class RuleTypeField(obj_fields.BaseEnumField): def __init__(self, **kwargs): self.AUTO_TYPE = obj_fields.Enum( valid_values=qos_constants.VALID_RULE_TYPES) - super(RuleTypeField, self).__init__(**kwargs) + super().__init__(**kwargs) @base.NeutronObjectRegistry.register diff --git a/neutron/objects/quota.py b/neutron/objects/quota.py index 3ae157c6e06..c7a5ec3faac 100644 --- a/neutron/objects/quota.py +++ b/neutron/objects/quota.py @@ -61,7 +61,7 @@ class Reservation(base.NeutronDbObject): def create(self): deltas = self.resource_deltas with self.db_context_writer(self.obj_context): - super(Reservation, self).create() + super().create() if deltas: for delta in deltas: delta.reservation_id = self.id @@ -104,8 +104,8 @@ class Reservation(base.NeutronDbObject): exp_expr)).group_by( models.ResourceDelta.resource, models.Reservation.expiration) - return dict((resource, total_reserved) - for (resource, exp, total_reserved) in resv_query) + return {resource: total_reserved + for (resource, exp, total_reserved) in resv_query} @base.NeutronObjectRegistry.register diff --git a/neutron/objects/rbac_db.py b/neutron/objects/rbac_db.py index 4270f9a381e..4b6cdf7cb11 100644 --- a/neutron/objects/rbac_db.py +++ b/neutron/objects/rbac_db.py @@ -83,8 +83,7 @@ class RbacNeutronDbObjectMixin(rbac_db_mixin.RbacPluginMixin, @classmethod def is_accessible(cls, context, db_obj): - return (super( - RbacNeutronDbObjectMixin, cls).is_accessible(context, db_obj) or + return (super().is_accessible(context, db_obj) or cls.is_shared_with_project(context, db_obj.id, context.project_id)) @@ -116,7 +115,7 @@ class RbacNeutronDbObjectMixin(rbac_db_mixin.RbacPluginMixin, def raise_policy_in_use(): raise ext_rbac.RbacPolicyInUse( object_id=obj_id, - details='project_id={}'.format(target_project)) + details=f'project_id={target_project}') if target_project != '*': # if there is a wildcard rule, we can return early because it @@ -250,12 +249,12 @@ class RbacNeutronDbObjectMixin(rbac_db_mixin.RbacPluginMixin, def from_db_object(self, db_obj): self._load_shared(db_obj) - super(RbacNeutronDbObjectMixin, self).from_db_object(db_obj) + super().from_db_object(db_obj) def obj_load_attr(self, attrname): if attrname == 'shared': return self._load_shared() - super(RbacNeutronDbObjectMixin, self).obj_load_attr(attrname) + super().obj_load_attr(attrname) def _load_shared(self, db_obj=None): # Do not override 'shared' attribute on create() or update() diff --git a/neutron/objects/router.py b/neutron/objects/router.py index a94589bd94b..f6aa2c9824a 100644 --- a/neutron/objects/router.py +++ b/neutron/objects/router.py @@ -52,7 +52,7 @@ class RouterRoute(base.NeutronDbObject): @classmethod def modify_fields_from_db(cls, db_obj): - result = super(RouterRoute, cls).modify_fields_from_db(db_obj) + result = super().modify_fields_from_db(db_obj) if 'destination' in result: result['destination'] = net_utils.AuthenticIPNetwork( result['destination']) @@ -62,7 +62,7 @@ class RouterRoute(base.NeutronDbObject): @classmethod def modify_fields_to_db(cls, fields): - result = super(RouterRoute, cls).modify_fields_to_db(fields) + result = super().modify_fields_to_db(fields) if 'destination' in result: result['destination'] = cls.filter_to_str(result['destination']) if 'nexthop' in result: @@ -96,7 +96,7 @@ class RouterExtraAttributes(base.NeutronDbObject): @classmethod def modify_fields_from_db(cls, db_obj): - result = super(RouterExtraAttributes, cls).modify_fields_from_db( + result = super().modify_fields_from_db( db_obj) if az_def.AZ_HINTS in result: result[az_def.AZ_HINTS] = ( @@ -106,7 +106,7 @@ class RouterExtraAttributes(base.NeutronDbObject): @classmethod def modify_fields_to_db(cls, fields): - result = super(RouterExtraAttributes, cls).modify_fields_to_db(fields) + result = super().modify_fields_to_db(fields) if az_def.AZ_HINTS in result: result[az_def.AZ_HINTS] = ( az_validator.convert_az_list_to_string( @@ -208,7 +208,7 @@ class DVRMacAddress(base.NeutronDbObject): @classmethod def modify_fields_from_db(cls, db_obj): - fields = super(DVRMacAddress, cls).modify_fields_from_db(db_obj) + fields = super().modify_fields_from_db(db_obj) if 'mac_address' in fields: # NOTE(tonytan4ever): Here uses AuthenticEUI to retain the format # passed from API. @@ -218,7 +218,7 @@ class DVRMacAddress(base.NeutronDbObject): @classmethod def modify_fields_to_db(cls, fields): - result = super(DVRMacAddress, cls).modify_fields_to_db(fields) + result = super().modify_fields_to_db(fields) if 'mac_address' in fields: result['mac_address'] = cls.filter_to_str(result['mac_address']) return result @@ -353,7 +353,7 @@ class FloatingIP(base.NeutronDbObject): @classmethod def modify_fields_from_db(cls, db_obj): - result = super(FloatingIP, cls).modify_fields_from_db(db_obj) + result = super().modify_fields_from_db(db_obj) if 'fixed_ip_address' in result: result['fixed_ip_address'] = netaddr.IPAddress( result['fixed_ip_address']) @@ -364,7 +364,7 @@ class FloatingIP(base.NeutronDbObject): @classmethod def modify_fields_to_db(cls, fields): - result = super(FloatingIP, cls).modify_fields_to_db(fields) + result = super().modify_fields_to_db(fields) if 'fixed_ip_address' in result: if result['fixed_ip_address'] is not None: result['fixed_ip_address'] = cls.filter_to_str( @@ -389,19 +389,19 @@ class FloatingIP(base.NeutronDbObject): fields = self.obj_get_changes() with self.db_context_writer(self.obj_context): qos_policy_id = self.qos_policy_id - super(FloatingIP, self).create() + super().create() if 'qos_policy_id' in fields: self._attach_qos_policy(qos_policy_id) def update(self): fields = self.obj_get_changes() with self.db_context_writer(self.obj_context): - super(FloatingIP, self).update() + super().update() if 'qos_policy_id' in fields: self._attach_qos_policy(fields['qos_policy_id']) def from_db_object(self, db_obj): - super(FloatingIP, self).from_db_object(db_obj) + super().from_db_object(db_obj) fields_to_change = [] if db_obj.get('qos_policy_binding'): self.qos_policy_id = db_obj.qos_policy_binding.policy_id diff --git a/neutron/objects/securitygroup.py b/neutron/objects/securitygroup.py index 4ce210d5fb4..cd9cfb78f5b 100644 --- a/neutron/objects/securitygroup.py +++ b/neutron/objects/securitygroup.py @@ -70,13 +70,13 @@ class SecurityGroup(rbac_db.NeutronRbacObject): extra_filter_names = {'is_default'} - lazy_fields = set(['rules']) + lazy_fields = {'rules'} def create(self): # save is_default before super() resets it to False is_default = self.is_default with self.db_context_writer(self.obj_context): - super(SecurityGroup, self).create() + super().create() if is_default: default_group = DefaultSecurityGroup( self.obj_context, @@ -87,7 +87,7 @@ class SecurityGroup(rbac_db.NeutronRbacObject): self.obj_reset_changes(['is_default']) def from_db_object(self, db_obj): - super(SecurityGroup, self).from_db_object(db_obj) + super().from_db_object(db_obj) if self._load_synthetic_fields: setattr(self, 'is_default', bool(db_obj.get('default_security_group'))) @@ -95,7 +95,7 @@ class SecurityGroup(rbac_db.NeutronRbacObject): @classmethod def get_sg_by_id(cls, context, sg_id): - return super(SecurityGroup, cls).get_object(context, id=sg_id) + return super().get_object(context, id=sg_id) def obj_make_compatible(self, primitive, target_version): _target_version = versionutils.convert_version_to_tuple(target_version) @@ -207,7 +207,7 @@ class SecurityGroupRule(base.NeutronDbObject): # custom types. @classmethod def modify_fields_to_db(cls, fields): - result = super(SecurityGroupRule, cls).modify_fields_to_db(fields) + result = super().modify_fields_to_db(fields) remote_ip_prefix = result.get('remote_ip_prefix') if remote_ip_prefix: result['remote_ip_prefix'] = cls.filter_to_str(remote_ip_prefix) @@ -227,7 +227,7 @@ class SecurityGroupRule(base.NeutronDbObject): self.obj_reset_changes(['normalized_cidr']) def from_db_object(self, db_obj): - super(SecurityGroupRule, self).from_db_object(db_obj) + super().from_db_object(db_obj) self._load_normalized_cidr(db_obj) if self._load_synthetic_fields: setattr(self, 'belongs_to_default_sg', @@ -237,13 +237,13 @@ class SecurityGroupRule(base.NeutronDbObject): def obj_load_attr(self, attrname): if attrname == 'normalized_cidr': return self._load_normalized_cidr() - super(SecurityGroupRule, self).obj_load_attr(attrname) + super().obj_load_attr(attrname) # TODO(sayalilunkad): get rid of it once we switch the db model to using # custom types. @classmethod def modify_fields_from_db(cls, db_obj): - fields = super(SecurityGroupRule, cls).modify_fields_from_db(db_obj) + fields = super().modify_fields_from_db(db_obj) if 'remote_ip_prefix' in fields: fields['remote_ip_prefix'] = ( net_utils.AuthenticIPNetwork(fields['remote_ip_prefix'])) diff --git a/neutron/objects/securitygroup_default_rules.py b/neutron/objects/securitygroup_default_rules.py index 1ed349eaa5f..ab0ae8ef2f3 100644 --- a/neutron/objects/securitygroup_default_rules.py +++ b/neutron/objects/securitygroup_default_rules.py @@ -41,8 +41,7 @@ class SecurityGroupDefaultRule(base.NeutronDbObject): @classmethod def modify_fields_to_db(cls, fields): - result = super(SecurityGroupDefaultRule, - cls).modify_fields_to_db(fields) + result = super().modify_fields_to_db(fields) remote_ip_prefix = result.get('remote_ip_prefix') if remote_ip_prefix: result['remote_ip_prefix'] = cls.filter_to_str(remote_ip_prefix) @@ -50,8 +49,7 @@ class SecurityGroupDefaultRule(base.NeutronDbObject): @classmethod def modify_fields_from_db(cls, db_obj): - fields = super(SecurityGroupDefaultRule, - cls).modify_fields_from_db(db_obj) + fields = super().modify_fields_from_db(db_obj) if 'remote_ip_prefix' in fields: fields['remote_ip_prefix'] = ( net_utils.AuthenticIPNetwork(fields['remote_ip_prefix'])) diff --git a/neutron/objects/subnet.py b/neutron/objects/subnet.py index 36caed24779..54e3cd97962 100644 --- a/neutron/objects/subnet.py +++ b/neutron/objects/subnet.py @@ -67,9 +67,9 @@ class DNSNameServer(base.NeutronDbObject): if not _pager.sorts: # (NOTE) True means ASC, False is DESC _pager.sorts = [('order', True)] - return super(DNSNameServer, cls).get_objects(context, _pager, - validate_filters, - **kwargs) + return super().get_objects(context, _pager, + validate_filters, + **kwargs) @base.NeutronObjectRegistry.register @@ -92,7 +92,7 @@ class Route(base.NeutronDbObject): @classmethod def modify_fields_from_db(cls, db_obj): # TODO(korzen) remove this method when IP and CIDR decorator ready - result = super(Route, cls).modify_fields_from_db(db_obj) + result = super().modify_fields_from_db(db_obj) if 'destination' in result: result['destination'] = net_utils.AuthenticIPNetwork( result['destination']) @@ -103,7 +103,7 @@ class Route(base.NeutronDbObject): @classmethod def modify_fields_to_db(cls, fields): # TODO(korzen) remove this method when IP and CIDR decorator ready - result = super(Route, cls).modify_fields_to_db(fields) + result = super().modify_fields_to_db(fields) if 'destination' in result: result['destination'] = cls.filter_to_str(result['destination']) if 'nexthop' in fields: @@ -137,7 +137,7 @@ class IPAllocationPool(base.NeutronDbObject): @classmethod def modify_fields_from_db(cls, db_obj): # TODO(korzen) remove this method when IP and CIDR decorator ready - result = super(IPAllocationPool, cls).modify_fields_from_db(db_obj) + result = super().modify_fields_from_db(db_obj) if 'start' in result: result['start'] = netaddr.IPAddress(result['start']) if 'end' in result: @@ -147,7 +147,7 @@ class IPAllocationPool(base.NeutronDbObject): @classmethod def modify_fields_to_db(cls, fields): # TODO(korzen) remove this method when IP and CIDR decorator ready - result = super(IPAllocationPool, cls).modify_fields_to_db(fields) + result = super().modify_fields_to_db(fields) if 'first_ip' in result: result['first_ip'] = cls.filter_to_str(result['first_ip']) if 'last_ip' in result: @@ -253,7 +253,7 @@ class Subnet(base.NeutronDbObject): } def __init__(self, context=None, **kwargs): - super(Subnet, self).__init__(context, **kwargs) + super().__init__(context, **kwargs) self.add_extra_filter_name('shared') def obj_load_attr(self, attrname): @@ -265,7 +265,7 @@ class Subnet(base.NeutronDbObject): return self._load_service_types() if attrname == 'external': return self._load_external() - super(Subnet, self).obj_load_attr(attrname) + super().obj_load_attr(attrname) def _load_dns_publish_fixed_ip(self, db_obj=None): if db_obj: @@ -337,7 +337,7 @@ class Subnet(base.NeutronDbObject): return subnets def from_db_object(self, db_obj): - super(Subnet, self).from_db_object(db_obj) + super().from_db_object(db_obj) self._load_dns_publish_fixed_ip(db_obj) self._load_shared(db_obj) self._load_service_types(db_obj) @@ -346,7 +346,7 @@ class Subnet(base.NeutronDbObject): @classmethod def modify_fields_from_db(cls, db_obj): # TODO(korzen) remove this method when IP and CIDR decorator ready - result = super(Subnet, cls).modify_fields_from_db(db_obj) + result = super().modify_fields_from_db(db_obj) if 'cidr' in result: result['cidr'] = net_utils.AuthenticIPNetwork(result['cidr']) if 'gateway_ip' in result and result['gateway_ip'] is not None: @@ -356,7 +356,7 @@ class Subnet(base.NeutronDbObject): @classmethod def modify_fields_to_db(cls, fields): # TODO(korzen) remove this method when IP and CIDR decorator ready - result = super(Subnet, cls).modify_fields_to_db(fields) + result = super().modify_fields_to_db(fields) if 'cidr' in result: result['cidr'] = cls.filter_to_str(result['cidr']) if 'gateway_ip' in result and result['gateway_ip'] is not None: @@ -605,7 +605,7 @@ class NetworkSubnetLock(base.NeutronDbObject): @classmethod def lock_subnet(cls, context, network_id, subnet_id): - subnet_lock = super(NetworkSubnetLock, cls).get_object( + subnet_lock = super().get_object( context, network_id=network_id) if subnet_lock: subnet_lock.subnet_id = subnet_id diff --git a/neutron/objects/subnetpool.py b/neutron/objects/subnetpool.py index e011ae19938..81da5264951 100644 --- a/neutron/objects/subnetpool.py +++ b/neutron/objects/subnetpool.py @@ -72,7 +72,7 @@ class SubnetPool(rbac_db.NeutronRbacObject): synthetic_fields = ['prefixes'] def from_db_object(self, db_obj): - super(SubnetPool, self).from_db_object(db_obj) + super().from_db_object(db_obj) self.prefixes = [] self.prefixes = [ prefix.cidr @@ -94,7 +94,7 @@ class SubnetPool(rbac_db.NeutronRbacObject): fields = self.obj_get_changes() with self.db_context_writer(self.obj_context): prefixes = self.prefixes - super(SubnetPool, self).create() + super().create() if 'prefixes' in fields: self._attach_prefixes(prefixes) @@ -102,7 +102,7 @@ class SubnetPool(rbac_db.NeutronRbacObject): def update(self): fields = self.obj_get_changes() with self.db_context_writer(self.obj_context): - super(SubnetPool, self).update() + super().update() if 'prefixes' in fields: self._attach_prefixes(fields['prefixes']) @@ -166,7 +166,7 @@ class SubnetPoolPrefix(base.NeutronDbObject): # custom type @classmethod def modify_fields_to_db(cls, fields): - result = super(SubnetPoolPrefix, cls).modify_fields_to_db(fields) + result = super().modify_fields_to_db(fields) if 'cidr' in result: result['cidr'] = cls.filter_to_str(result['cidr']) return result @@ -175,7 +175,7 @@ class SubnetPoolPrefix(base.NeutronDbObject): # custom type @classmethod def modify_fields_from_db(cls, db_obj): - fields = super(SubnetPoolPrefix, cls).modify_fields_from_db(db_obj) + fields = super().modify_fields_from_db(db_obj) if 'cidr' in fields: fields['cidr'] = netaddr.IPNetwork(fields['cidr']) return fields diff --git a/neutron/objects/trunk.py b/neutron/objects/trunk.py index 84272f91d1c..b0384ab284e 100644 --- a/neutron/objects/trunk.py +++ b/neutron/objects/trunk.py @@ -46,7 +46,7 @@ class SubPort(base.NeutronDbObject): fields_no_update = ['segmentation_type', 'segmentation_id', 'trunk_id'] def to_dict(self): - _dict = super(SubPort, self).to_dict() + _dict = super().to_dict() # trunk_id is redundant in the subport dict. _dict.pop('trunk_id') return _dict @@ -54,7 +54,7 @@ class SubPort(base.NeutronDbObject): def create(self): try: with self.db_context_writer(self.obj_context): - super(SubPort, self).create() + super().create() except o_db_exc.DBReferenceError as ex: if ex.key_table is None: # NOTE(ivc): 'key_table' is provided by 'oslo.db' [1] @@ -112,7 +112,7 @@ class Trunk(base.NeutronDbObject): sub_ports = self.sub_ports try: - super(Trunk, self).create() + super().create() except o_db_exc.DBReferenceError: raise n_exc.PortNotFound(port_id=self.port_id) @@ -125,10 +125,10 @@ class Trunk(base.NeutronDbObject): def update(self, **kwargs): self.update_fields(kwargs) - super(Trunk, self).update() + super().update() def to_dict(self): - _dict = super(Trunk, self).to_dict() + _dict = super().to_dict() resource_extend.apply_funcs(trunk_def.TRUNKS, _dict, self.db_obj) return _dict diff --git a/neutron/pecan_wsgi/controllers/extensions.py b/neutron/pecan_wsgi/controllers/extensions.py index e95b751d4c3..2dc369ed97d 100644 --- a/neutron/pecan_wsgi/controllers/extensions.py +++ b/neutron/pecan_wsgi/controllers/extensions.py @@ -19,7 +19,7 @@ from neutron.api import extensions from neutron.pecan_wsgi.controllers import utils -class ExtensionsController(object): +class ExtensionsController: @utils.expose() def _lookup(self, alias, *remainder): @@ -43,7 +43,7 @@ class ExtensionsController(object): pecan.abort(404) -class ExtensionController(object): +class ExtensionController: def __init__(self, alias): self.alias = alias diff --git a/neutron/pecan_wsgi/controllers/quota.py b/neutron/pecan_wsgi/controllers/quota.py index d0a70733e23..28df4b3a6ae 100644 --- a/neutron/pecan_wsgi/controllers/quota.py +++ b/neutron/pecan_wsgi/controllers/quota.py @@ -41,7 +41,7 @@ class QuotasController(utils.NeutronPecanController): self._driver = importutils.import_class( cfg.CONF.QUOTAS.quota_driver ) - super(QuotasController, self).__init__( + super().__init__( "%ss" % RESOURCE_NAME, RESOURCE_NAME) def _check_admin(self, context, @@ -79,7 +79,7 @@ class QuotaController(utils.NeutronPecanController): self._driver = _driver self._tenant_id = tenant_id - super(QuotaController, self).__init__( + super().__init__( "%ss" % RESOURCE_NAME, RESOURCE_NAME) # Ensure limits for all registered resources are returned diff --git a/neutron/pecan_wsgi/controllers/resource.py b/neutron/pecan_wsgi/controllers/resource.py index cebb03548e9..daa6b1c6ac6 100644 --- a/neutron/pecan_wsgi/controllers/resource.py +++ b/neutron/pecan_wsgi/controllers/resource.py @@ -29,10 +29,10 @@ class ItemController(utils.NeutronPecanController): def __init__(self, resource, item, plugin=None, resource_info=None, parent_resource=None, member_actions=None): - super(ItemController, self).__init__(None, resource, plugin=plugin, - resource_info=resource_info, - parent_resource=parent_resource, - member_actions=member_actions) + super().__init__(None, resource, plugin=plugin, + resource_info=resource_info, + parent_resource=parent_resource, + member_actions=member_actions) self.item = item @utils.expose(generic=True) @@ -199,7 +199,7 @@ class MemberActionController(ItemController): def __init__(self, resource, item, parent_controller, plugin=None, resource_info=None, show_action=None, update_action=None): - super(MemberActionController, self).__init__( + super().__init__( resource, item, plugin=plugin, resource_info=resource_info) self._show_action = show_action self._update_action = update_action @@ -236,4 +236,4 @@ class MemberActionController(ItemController): @utils.when(index, method='PATCH') @utils.when(index, method='DELETE') def not_supported(self): - return super(MemberActionController, self).not_supported() + return super().not_supported() diff --git a/neutron/pecan_wsgi/controllers/root.py b/neutron/pecan_wsgi/controllers/root.py index f360aa55d1c..a9523402ed3 100644 --- a/neutron/pecan_wsgi/controllers/root.py +++ b/neutron/pecan_wsgi/controllers/root.py @@ -54,7 +54,7 @@ def _get_version_info(): return _VERSION_INFO.values() -class RootController(object): +class RootController: @utils.expose(generic=True) def index(self): @@ -77,7 +77,7 @@ class RootController(object): pecan.abort(405) -class V2Controller(object): +class V2Controller: # Same data structure as neutron.api.versions.Versions for API backward # compatibility diff --git a/neutron/pecan_wsgi/controllers/utils.py b/neutron/pecan_wsgi/controllers/utils.py index 137a4a68d06..0ecf5db9912 100644 --- a/neutron/pecan_wsgi/controllers/utils.py +++ b/neutron/pecan_wsgi/controllers/utils.py @@ -33,7 +33,7 @@ from neutron_lib import exceptions LOG = logging.getLogger(__name__) -class Fakecode(object): +class Fakecode: co_varnames = () @@ -110,7 +110,7 @@ def _set_del_code(f): return wrapped -class NeutronPecanController(object): +class NeutronPecanController: LIST = 'list' SHOW = 'show' @@ -132,9 +132,9 @@ class NeutronPecanController(object): # Controllers for some resources that are not mapped to anything in # RESOURCE_ATTRIBUTE_MAP will not have anything in _resource_info if self.resource_info: - self._mandatory_fields = set([field for (field, data) in - self.resource_info.items() if - data.get('required_by_policy')]) + self._mandatory_fields = {field for (field, data) in + self.resource_info.items() if + data.get('required_by_policy')} if 'tenant_id' in self._mandatory_fields: # ensure that project_id is queried in the database when # tenant_id is required @@ -165,11 +165,11 @@ class NeutronPecanController(object): self._parent_id_name = ('%s_id' % self.parent if self.parent else None) self._plugin_handlers = { - self.LIST: 'get%s_%s' % (parent_resource, self.collection), - self.SHOW: 'get%s_%s' % (parent_resource, self.resource) + self.LIST: 'get{}_{}'.format(parent_resource, self.collection), + self.SHOW: 'get{}_{}'.format(parent_resource, self.resource) } for action in [self.CREATE, self.UPDATE, self.DELETE]: - self._plugin_handlers[action] = '%s%s_%s' % ( + self._plugin_handlers[action] = '{}{}_{}'.format( action, parent_resource, self.resource) self.item = item self.action_status = action_status or {} @@ -267,7 +267,7 @@ class NeutronPecanController(object): return getattr(self.plugin, self._plugin_handlers[self.UPDATE]) -class ShimRequest(object): +class ShimRequest: def __init__(self, context): self.context = context @@ -285,7 +285,7 @@ class ShimItemController(NeutronPecanController): def __init__(self, collection, resource, item, controller, collection_actions=None, member_actions=None, action_status=None): - super(ShimItemController, self).__init__( + super().__init__( collection, resource, collection_actions=collection_actions, member_actions=member_actions, item=item, action_status=action_status) @@ -355,7 +355,7 @@ class ShimCollectionsController(NeutronPecanController): collection_actions=None, member_actions=None, collection_methods=None, action_status=None): collection_methods = collection_methods or {} - super(ShimCollectionsController, self).__init__( + super().__init__( collection, resource, member_actions=member_actions, collection_actions=collection_actions, action_status=action_status) @@ -423,7 +423,7 @@ class ShimMemberActionController(NeutronPecanController): def __init__(self, collection, resource, item, controller, member_actions): - super(ShimMemberActionController, self).__init__( + super().__init__( collection, resource, member_actions=member_actions, item=item) self.controller = controller self.inverted_member_actions = invert_dict(self._member_actions) @@ -438,7 +438,7 @@ class ShimMemberActionController(NeutronPecanController): return method(shim_request, self.item, **uri_identifiers) -class PecanResourceExtension(object): +class PecanResourceExtension: def __init__(self, collection, controller, plugin): self.collection = collection diff --git a/neutron/pecan_wsgi/hooks/notifier.py b/neutron/pecan_wsgi/hooks/notifier.py index 1b4334cf2d2..030fda8e054 100644 --- a/neutron/pecan_wsgi/hooks/notifier.py +++ b/neutron/pecan_wsgi/hooks/notifier.py @@ -43,7 +43,7 @@ class NotifierHook(hooks.PecanHook): if utils.is_member_action(utils.get_controller(state)): return action = pecan_constants.ACTION_MAP.get(state.request.method) - event = '%s.%s.start' % (resource, action) + event = '{}.{}.start'.format(resource, action) if action in ('create', 'update'): # notifier just gets plain old body without any treatment other # than the population of the object ID being operated on @@ -96,7 +96,7 @@ class NotifierHook(hooks.PecanHook): else: result = state.response.json - notifier_method = '%s.%s.end' % (resource_name, action) + notifier_method = '{}.{}.end'.format(resource_name, action) notifier_action = utils.get_controller(state).plugin_handlers[action] registry.publish(resource_name, events.BEFORE_RESPONSE, self, payload=events.APIEventPayload( diff --git a/neutron/pecan_wsgi/hooks/policy_enforcement.py b/neutron/pecan_wsgi/hooks/policy_enforcement.py index fd9ffd71a00..46db98c36d9 100644 --- a/neutron/pecan_wsgi/hooks/policy_enforcement.py +++ b/neutron/pecan_wsgi/hooks/policy_enforcement.py @@ -237,7 +237,7 @@ class PolicyHook(hooks.PecanHook): context, # NOTE(kevinbenton): this used to reference a # _plugin_handlers dict, why? - 'get_%s:%s' % (resource, attr_name), + 'get_{}:{}'.format(resource, attr_name), data, might_not_exist=True, pluralized=collection): diff --git a/neutron/plugins/ml2/common/exceptions.py b/neutron/plugins/ml2/common/exceptions.py index 106d4275fa5..4837947cb02 100644 --- a/neutron/plugins/ml2/common/exceptions.py +++ b/neutron/plugins/ml2/common/exceptions.py @@ -28,7 +28,7 @@ class MechanismDriverError(exceptions.MultipleExceptions): # MultipleExceptions and return inner exceptions. Keep it # for backward-compatibility, in case other code use it. self.message = _("%s failed.") % method - super(MechanismDriverError, self).__init__(errors or []) + super().__init__(errors or []) class ExtensionDriverError(exceptions.InvalidInput): diff --git a/neutron/plugins/ml2/db.py b/neutron/plugins/ml2/db.py index 2d29a3eb3a6..0a16ed0cc81 100644 --- a/neutron/plugins/ml2/db.py +++ b/neutron/plugins/ml2/db.py @@ -178,8 +178,8 @@ def get_sg_ids_grouped_by_port(context, port_ids): with db_api.CONTEXT_READER.using(context): # partial UUIDs must be individually matched with startswith. # full UUIDs may be matched directly in an IN statement - partial_uuids = set(port_id for port_id in port_ids - if not uuidutils.is_uuid_like(port_id)) + partial_uuids = {port_id for port_id in port_ids + if not uuidutils.is_uuid_like(port_id)} full_uuids = set(port_ids) - partial_uuids or_criteria = [models_v2.Port.id.startswith(port_id) for port_id in partial_uuids] diff --git a/neutron/plugins/ml2/driver_context.py b/neutron/plugins/ml2/driver_context.py index 9942068fe03..d5115596531 100644 --- a/neutron/plugins/ml2/driver_context.py +++ b/neutron/plugins/ml2/driver_context.py @@ -25,7 +25,7 @@ from neutron.db import segments_db LOG = log.getLogger(__name__) -class InstanceSnapshot(object): +class InstanceSnapshot: """Used to avoid holding references to DB objects in PortContext.""" def __init__(self, obj): self._model_class = obj.__class__ @@ -56,7 +56,7 @@ class InstanceSnapshot(object): return getattr(self, item) -class MechanismDriverContext(object): +class MechanismDriverContext: """MechanismDriver context base class.""" def __init__(self, plugin, plugin_context): self._plugin = plugin @@ -74,7 +74,7 @@ class NetworkContext(MechanismDriverContext, api.NetworkContext): def __init__(self, plugin, plugin_context, network, original_network=None, segments=None): - super(NetworkContext, self).__init__(plugin, plugin_context) + super().__init__(plugin, plugin_context) self._network = network self._original_network = original_network self._segments = segments_db.get_network_segments( @@ -97,7 +97,7 @@ class SubnetContext(MechanismDriverContext, api.SubnetContext): def __init__(self, plugin, plugin_context, subnet, network, original_subnet=None): - super(SubnetContext, self).__init__(plugin, plugin_context) + super().__init__(plugin, plugin_context) self._subnet = subnet self._original_subnet = original_subnet self._network_context = NetworkContext(plugin, plugin_context, @@ -125,7 +125,7 @@ class PortContext(MechanismDriverContext, api.PortContext): def __init__(self, plugin, plugin_context, port, network, binding, binding_levels, original_port=None): - super(PortContext, self).__init__(plugin, plugin_context) + super().__init__(plugin, plugin_context) self._port = port self._original_port = original_port if isinstance(network, NetworkContext): diff --git a/neutron/plugins/ml2/drivers/agent/_agent_manager_base.py b/neutron/plugins/ml2/drivers/agent/_agent_manager_base.py index 7a312ba5780..6d70898ed2a 100644 --- a/neutron/plugins/ml2/drivers/agent/_agent_manager_base.py +++ b/neutron/plugins/ml2/drivers/agent/_agent_manager_base.py @@ -17,7 +17,7 @@ import abc -class NetworkSegment(object): +class NetworkSegment: """Represents a Neutron network segment""" def __init__(self, network_type, physical_network, segmentation_id, mtu=None): @@ -27,7 +27,7 @@ class NetworkSegment(object): self.mtu = mtu -class CommonAgentManagerRpcCallBackBase(object, metaclass=abc.ABCMeta): +class CommonAgentManagerRpcCallBackBase(metaclass=abc.ABCMeta): """Base class for managers RPC callbacks. This class must be inherited by a RPC callback class that is used @@ -78,7 +78,7 @@ class CommonAgentManagerRpcCallBackBase(object, metaclass=abc.ABCMeta): return updated_devices -class CommonAgentManagerBase(object, metaclass=abc.ABCMeta): +class CommonAgentManagerBase(metaclass=abc.ABCMeta): """Base class for managers that are used with the common agent loop. This class must be inherited by a manager class that is used diff --git a/neutron/plugins/ml2/drivers/agent/_common_agent.py b/neutron/plugins/ml2/drivers/agent/_common_agent.py index 0caf1c98b92..3d31e11a9e1 100644 --- a/neutron/plugins/ml2/drivers/agent/_common_agent.py +++ b/neutron/plugins/ml2/drivers/agent/_common_agent.py @@ -60,7 +60,7 @@ class CommonAgentLoop(service.Service): :param agent_type: Specifies the type of the agent :param agent_binary: The agent binary string """ - super(CommonAgentLoop, self).__init__() + super().__init__() self.mgr = manager self._validate_manager_class() self.polling_interval = polling_interval @@ -119,7 +119,7 @@ class CommonAgentLoop(service.Service): self.set_rpc_timeout(self.quitting_rpc_timeout) if self.connection: self.connection.close() - super(CommonAgentLoop, self).stop(graceful) + super().stop(graceful) def reset(self): common_config.setup_logging() diff --git a/neutron/plugins/ml2/drivers/helpers.py b/neutron/plugins/ml2/drivers/helpers.py index 91b0b1ea1f3..cb789ac2d1a 100644 --- a/neutron/plugins/ml2/drivers/helpers.py +++ b/neutron/plugins/ml2/drivers/helpers.py @@ -57,7 +57,7 @@ class SegmentTypeDriver(BaseTypeDriver): """ def __init__(self, model): - super(SegmentTypeDriver, self).__init__() + super().__init__() self.model = model.db_model self.segmentation_obj = model primary_keys_columns = self.model.__table__.primary_key.columns @@ -147,7 +147,7 @@ class SegmentTypeDriver(BaseTypeDriver): if not isinstance(allocations, list): allocations = [allocations] if allocations else [] for alloc in allocations: - segment = dict((k, alloc[k]) for k in self.primary_keys) + segment = {k: alloc[k] for k in self.primary_keys} try_to_allocate = True if self.segmentation_obj.allocate(context, **segment): LOG.debug('%(type)s segment allocate from pool success ' diff --git a/neutron/plugins/ml2/drivers/l2pop/mech_driver.py b/neutron/plugins/ml2/drivers/l2pop/mech_driver.py index 7f6c3d09823..f6e96bb0ab3 100644 --- a/neutron/plugins/ml2/drivers/l2pop/mech_driver.py +++ b/neutron/plugins/ml2/drivers/l2pop/mech_driver.py @@ -32,7 +32,7 @@ LOG = logging.getLogger(__name__) class L2populationMechanismDriver(api.MechanismDriver): def __init__(self): - super(L2populationMechanismDriver, self).__init__() + super().__init__() self.L2populationAgentNotify = l2pop_rpc.L2populationAgentNotifyAPI() def initialize(self): @@ -95,8 +95,8 @@ class L2populationMechanismDriver(api.MechanismDriver): return set() def _get_diff_ips(self, orig, port): - orig_ips = set([ip['ip_address'] for ip in orig['fixed_ips']]) - port_ips = set([ip['ip_address'] for ip in port['fixed_ips']]) + orig_ips = {ip['ip_address'] for ip in orig['fixed_ips']} + port_ips = {ip['ip_address'] for ip in port['fixed_ips']} # check if an ip has been added or removed orig_chg_ips = orig_ips.difference(port_ips) diff --git a/neutron/plugins/ml2/drivers/l2pop/rpc.py b/neutron/plugins/ml2/drivers/l2pop/rpc.py index 91eed7118c5..4489f4fb87e 100644 --- a/neutron/plugins/ml2/drivers/l2pop/rpc.py +++ b/neutron/plugins/ml2/drivers/l2pop/rpc.py @@ -27,7 +27,7 @@ LOG = logging.getLogger(__name__) PortInfo = collections.namedtuple("PortInfo", "mac_address ip_address") -class L2populationAgentNotifyAPI(object): +class L2populationAgentNotifyAPI: def __init__(self, topic=topics.AGENT): self.topic = topic diff --git a/neutron/plugins/ml2/drivers/l2pop/rpc_manager/l2population_rpc.py b/neutron/plugins/ml2/drivers/l2pop/rpc_manager/l2population_rpc.py index c14dfee0c22..74c3c42b71a 100644 --- a/neutron/plugins/ml2/drivers/l2pop/rpc_manager/l2population_rpc.py +++ b/neutron/plugins/ml2/drivers/l2pop/rpc_manager/l2population_rpc.py @@ -24,7 +24,7 @@ from neutron.plugins.ml2.drivers.l2pop import rpc as l2pop_rpc from neutron.plugins.ml2.drivers.openvswitch.agent import vlanmanager -class L2populationRpcCallBackMixin(object, metaclass=abc.ABCMeta): +class L2populationRpcCallBackMixin(metaclass=abc.ABCMeta): '''General mixin class of L2-population RPC call back. The following methods are called through RPC. @@ -84,10 +84,10 @@ class L2populationRpcCallBackMixin(object, metaclass=abc.ABCMeta): for pi in agent[when]] for value in unmarshalled.values(): if 'ports' in value: - value['ports'] = dict( - (address, [l2pop_rpc.PortInfo(*pi) for pi in port_infos]) + value['ports'] = { + address: [l2pop_rpc.PortInfo(*pi) for pi in port_infos] for address, port_infos in value['ports'].items() - ) + } return unmarshalled @abc.abstractmethod diff --git a/neutron/plugins/ml2/drivers/linuxbridge/agent/arp_protect.py b/neutron/plugins/ml2/drivers/linuxbridge/agent/arp_protect.py index 0c1b13be195..96fe13c6945 100644 --- a/neutron/plugins/ml2/drivers/linuxbridge/agent/arp_protect.py +++ b/neutron/plugins/ml2/drivers/linuxbridge/agent/arp_protect.py @@ -64,7 +64,7 @@ def _setup_arp_spoofing_protection(vif, port_details): def chain_name(vif): # start each chain with a common identifier for cleanup to find - return '%s%s' % (SPOOF_CHAIN_PREFIX, vif) + return '{}{}'.format(SPOOF_CHAIN_PREFIX, vif) @lockutils.synchronized('ebtables') @@ -194,7 +194,7 @@ def _mac_vif_jump_present(vif, current_rules): def _mac_chain_name(vif): - return '%s%s' % (MAC_CHAIN_PREFIX, vif) + return '{}{}'.format(MAC_CHAIN_PREFIX, vif) def _delete_vif_mac_rules(vif, current_rules): diff --git a/neutron/plugins/ml2/drivers/linuxbridge/agent/extension_drivers/qos_driver.py b/neutron/plugins/ml2/drivers/linuxbridge/agent/extension_drivers/qos_driver.py index 2aefa30f953..98e1e34d2db 100644 --- a/neutron/plugins/ml2/drivers/linuxbridge/agent/extension_drivers/qos_driver.py +++ b/neutron/plugins/ml2/drivers/linuxbridge/agent/extension_drivers/qos_driver.py @@ -41,7 +41,7 @@ class QosLinuxbridgeAgentDriver(qos.QosLinuxAgentDriver): const.EGRESS_DIRECTION: "o"} def __init__(self): - super(QosLinuxbridgeAgentDriver, self).__init__() + super().__init__() self.iptables_manager = None self.agent_api = None self.tbf_latency = cfg.CONF.QOS.tbf_latency @@ -63,8 +63,8 @@ class QosLinuxbridgeAgentDriver(qos.QosLinuxAgentDriver): def _dscp_chain_name(self, direction, device): return iptables_manager.get_chain_name( - "qos-%s%s" % (self.IPTABLES_DIRECTION_PREFIX[direction], - device[3:])) + "qos-{}{}".format(self.IPTABLES_DIRECTION_PREFIX[direction], + device[3:])) def _dscp_rule(self, direction, device): return ('-m physdev --%s %s --physdev-is-bridged ' diff --git a/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_agent_extension_api.py b/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_agent_extension_api.py index 720552d09f3..23b9082ba30 100644 --- a/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_agent_extension_api.py +++ b/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_agent_extension_api.py @@ -14,7 +14,7 @@ # under the License. -class LinuxbridgeAgentExtensionAPI(object): +class LinuxbridgeAgentExtensionAPI: '''Implements the Agent API for L2 agent. Extensions can gain access to this API by overriding the consume_api @@ -22,7 +22,7 @@ class LinuxbridgeAgentExtensionAPI(object): ''' def __init__(self, iptables_manager): - super(LinuxbridgeAgentExtensionAPI, self).__init__() + super().__init__() self.iptables_manager = iptables_manager def get_iptables_manager(self): diff --git a/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py b/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py index 3e049bd553f..65fd74143cb 100644 --- a/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py +++ b/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py @@ -74,7 +74,7 @@ IPTABLES_DRIVERS = [ class LinuxBridgeManager(amb.CommonAgentManagerBase): def __init__(self, bridge_mappings, interface_mappings): - super(LinuxBridgeManager, self).__init__() + super().__init__() self.bridge_mappings = bridge_mappings self.interface_mappings = interface_mappings self.validate_interface_mappings() @@ -200,7 +200,7 @@ class LinuxBridgeManager(amb.CommonAgentManagerBase): physical_interface = plugin_utils.get_interface_name( physical_interface, max_len=(constants.DEVICE_NAME_MAX_LEN - MAX_VLAN_POSTFIX_LEN)) - return "%s%s" % (physical_interface, vlan_postfix) + return "{}{}".format(physical_interface, vlan_postfix) @staticmethod def get_tap_device_name(interface_id): diff --git a/neutron/plugins/ml2/drivers/linuxbridge/mech_driver/mech_linuxbridge.py b/neutron/plugins/ml2/drivers/linuxbridge/mech_driver/mech_linuxbridge.py index 12f21c354ad..bfee040601c 100644 --- a/neutron/plugins/ml2/drivers/linuxbridge/mech_driver/mech_linuxbridge.py +++ b/neutron/plugins/ml2/drivers/linuxbridge/mech_driver/mech_linuxbridge.py @@ -40,7 +40,7 @@ class LinuxbridgeMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase): vif_details = {portbindings.CAP_PORT_FILTER: sg_enabled, portbindings.VIF_DETAILS_CONNECTIVITY: self.connectivity} - super(LinuxbridgeMechanismDriver, self).__init__( + super().__init__( constants.AGENT_TYPE_LINUXBRIDGE, portbindings.VIF_TYPE_BRIDGE, vif_details) diff --git a/neutron/plugins/ml2/drivers/macvtap/macvtap_common.py b/neutron/plugins/ml2/drivers/macvtap/macvtap_common.py index 8d185d637e7..a32f03bca2d 100644 --- a/neutron/plugins/ml2/drivers/macvtap/macvtap_common.py +++ b/neutron/plugins/ml2/drivers/macvtap/macvtap_common.py @@ -27,4 +27,4 @@ def get_vlan_device_name(src_dev, vlan): # Ensure that independent of the vlan len the same name prefix is used. src_dev = plugin_utils.get_interface_name( src_dev, max_len=n_const.DEVICE_NAME_MAX_LEN - MAX_VLAN_POSTFIX_LEN) - return "%s.%s" % (src_dev, vlan) + return "{}.{}".format(src_dev, vlan) diff --git a/neutron/plugins/ml2/drivers/macvtap/mech_driver/mech_macvtap.py b/neutron/plugins/ml2/drivers/macvtap/mech_driver/mech_macvtap.py index e43abab6cae..591cc5f24cd 100644 --- a/neutron/plugins/ml2/drivers/macvtap/mech_driver/mech_macvtap.py +++ b/neutron/plugins/ml2/drivers/macvtap/mech_driver/mech_macvtap.py @@ -41,7 +41,7 @@ class MacvtapMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase): vif_details = {portbindings.CAP_PORT_FILTER: False, portbindings.VIF_DETAILS_CONNECTIVITY: self.connectivity} - super(MacvtapMechanismDriver, self).__init__( + super().__init__( constants.AGENT_TYPE_MACVTAP, portbindings.VIF_TYPE_MACVTAP, vif_details) diff --git a/neutron/plugins/ml2/drivers/mech_agent.py b/neutron/plugins/ml2/drivers/mech_agent.py index 515892f3724..faeec8e284f 100644 --- a/neutron/plugins/ml2/drivers/mech_agent.py +++ b/neutron/plugins/ml2/drivers/mech_agent.py @@ -49,7 +49,7 @@ class AgentMechanismDriverBase(api.MechanismDriver, metaclass=abc.ABCMeta): :param agent_type: Constant identifying agent type in agents_db :param supported_vnic_types: The binding:vnic_type values we can bind """ - super(AgentMechanismDriverBase, self).__init__() + super().__init__() self.agent_type = agent_type self.supported_vnic_types = supported_vnic_types @@ -316,7 +316,7 @@ class SimpleAgentMechanismDriverBase(AgentMechanismDriverBase, """ supported_vnic_types = (supported_vnic_types or [portbindings.VNIC_NORMAL]) - super(SimpleAgentMechanismDriverBase, self).__init__( + super().__init__( agent_type, supported_vnic_types) self.supported_vnic_types = self.prohibit_list_supported_vnic_types( self.supported_vnic_types, vnic_type_prohibit_list) diff --git a/neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py b/neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py index 143b3591e54..3542899b6a8 100644 --- a/neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py +++ b/neutron/plugins/ml2/drivers/mech_sriov/agent/eswitch_manager.py @@ -39,7 +39,7 @@ IP_LINK_CAPABILITY_SPOOFCHK = 'spoofchk' IP_LINK_SUB_CAPABILITY_QOS = 'qos' -class PciOsWrapper(object): +class PciOsWrapper: """OS wrapper for checking virtual functions""" DEVICE_PATH = "/sys/class/net/%s/device" @@ -134,13 +134,13 @@ class PciOsWrapper(object): LOG.debug("Number of VFs configured on device %s: %s", dev_name, numvfs) return numvfs - except IOError: + except OSError: LOG.warning("Error reading sriov_numvfs file for device %s, " "probably not supported by this device", dev_name) return -1 -class EmbSwitch(object): +class EmbSwitch: """Class to manage logical embedded switch entity. Embedded Switch object is logical entity representing all VFs @@ -314,13 +314,13 @@ class EmbSwitch(object): return mac -class ESwitchManager(object): +class ESwitchManager: """Manages logical Embedded Switch entities for physical network.""" def __new__(cls): # make it a singleton if not hasattr(cls, '_instance'): - cls._instance = super(ESwitchManager, cls).__new__(cls) + cls._instance = super().__new__(cls) cls.emb_switches_map = {} cls.pci_slot_map = {} cls.skipped_devices = set() diff --git a/neutron/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/qos_driver.py b/neutron/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/qos_driver.py index 094fad4b6e2..3de79734894 100644 --- a/neutron/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/qos_driver.py +++ b/neutron/plugins/ml2/drivers/mech_sriov/agent/extension_drivers/qos_driver.py @@ -27,7 +27,7 @@ class QosSRIOVAgentDriver(qos.QosLinuxAgentDriver): SUPPORTED_RULES = driver.SUPPORTED_RULES def __init__(self): - super(QosSRIOVAgentDriver, self).__init__() + super().__init__() self.eswitch_mgr = None def initialize(self): diff --git a/neutron/plugins/ml2/drivers/mech_sriov/agent/pci_lib.py b/neutron/plugins/ml2/drivers/mech_sriov/agent/pci_lib.py index 55a43bcf9dc..e06bf9d6149 100644 --- a/neutron/plugins/ml2/drivers/mech_sriov/agent/pci_lib.py +++ b/neutron/plugins/ml2/drivers/mech_sriov/agent/pci_lib.py @@ -33,7 +33,7 @@ class PciDeviceIPWrapper(ip_lib.IPWrapper): """Wrapper class for ip link commands related to virtual functions.""" def __init__(self, dev_name): - super(PciDeviceIPWrapper, self).__init__() + super().__init__() self.dev_name = dev_name def get_assigned_macs(self, vf_list): diff --git a/neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py b/neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py index d9e82ed1a88..d0033410425 100644 --- a/neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py +++ b/neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py @@ -69,7 +69,7 @@ class SriovNicSwitchRpcCallbacks(sg_rpc.SecurityGroupAgentRpcCallbackMixin): target = oslo_messaging.Target(version='1.5') def __init__(self, context, agent, sg_agent): - super(SriovNicSwitchRpcCallbacks, self).__init__() + super().__init__() self.context = context self.agent = agent self.sg_agent = sg_agent @@ -154,7 +154,7 @@ class SriovNicSwitchRpcCallbacks(sg_rpc.SecurityGroupAgentRpcCallbackMixin): @profiler.trace_cls("rpc") -class SriovNicSwitchAgent(object): +class SriovNicSwitchAgent: def __init__(self, physical_devices_mappings, exclude_devices, polling_interval, rp_bandwidths, rp_inventory_defaults, rp_hypervisors): @@ -514,7 +514,7 @@ class SriovNicSwitchAgent(object): self.iter_num = self.iter_num + 1 -class SriovNicAgentConfigParser(object): +class SriovNicAgentConfigParser: def __init__(self): self.device_mappings = {} self.exclude_devices = {} diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/extension_drivers/qos_driver.py b/neutron/plugins/ml2/drivers/openvswitch/agent/extension_drivers/qos_driver.py index df7de51913c..bc5ebca1776 100644 --- a/neutron/plugins/ml2/drivers/openvswitch/agent/extension_drivers/qos_driver.py +++ b/neutron/plugins/ml2/drivers/openvswitch/agent/extension_drivers/qos_driver.py @@ -31,7 +31,7 @@ LOG = logging.getLogger(__name__) MAX_RETIES = 1000 -class MeterIDGenerator(object): +class MeterIDGenerator: # This cache will be: # PORT_METER_ID = {"port_id_1_ingress": 1, # "port_id_1_egress: 2, @@ -41,7 +41,7 @@ class MeterIDGenerator(object): def __new__(cls, *args, **kwargs): # make it a singleton if not hasattr(cls, '_instance'): - cls._instance = super(MeterIDGenerator, cls).__new__(cls) + cls._instance = super().__new__(cls) cls.PORT_METER_ID = {} return cls._instance @@ -75,7 +75,7 @@ class MeterIDGenerator(object): self.PORT_METER_ID[key] = meter_id -class MeterRuleManager(object): +class MeterRuleManager: def __init__(self, br_int, type_=comm_consts.METER_FLAG_PPS): self.br_int = br_int @@ -103,7 +103,7 @@ class MeterRuleManager(object): break def get_data_key(self, port_id, direction): - return "%s_%s_%s" % (self.rule_type, port_id, direction) + return "{}_{}_{}".format(self.rule_type, port_id, direction) def load_port_meter_id(self, port_name, port_id, direction): key = self.get_data_key(port_id, direction) @@ -150,7 +150,7 @@ class MeterRuleManager(object): return self.PORT_INFO_EGRESS.pop(port_id, (None, None, None)) -class OVSMeterQoSDriver(object): +class OVSMeterQoSDriver: SUPPORT_METER = None @@ -294,7 +294,7 @@ class QosOVSAgentDriver(qos.QosLinuxAgentDriver, SUPPORTED_RULES = driver.SUPPORTED_RULES def __init__(self): - super(QosOVSAgentDriver, self).__init__() + super().__init__() self.br_int_name = cfg.CONF.OVS.integration_bridge self.br_int = None self.agent_api = None diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/br_cookie.py b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/br_cookie.py index 25b077dcc13..226c1c9bd9d 100644 --- a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/br_cookie.py +++ b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/br_cookie.py @@ -18,13 +18,13 @@ import copy from neutron.agent.common import ovs_lib -class OVSBridgeCookieMixin(object): +class OVSBridgeCookieMixin: '''Mixin to provide cookie retention functionality to the OVSAgentBridge ''' def __init__(self, *args, **kwargs): - super(OVSBridgeCookieMixin, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self._reserved_cookies = set() @property @@ -51,7 +51,7 @@ class OVSBridgeCookieMixin(object): self._reserved_cookies.add(val) if self._default_cookie in self._reserved_cookies: self._reserved_cookies.remove(self._default_cookie) - super(OVSBridgeCookieMixin, self).set_agent_uuid_stamp(val) + super().set_agent_uuid_stamp(val) def clone(self): '''Used by OVSCookieBridge, can be overridden by subclasses if a diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_dvr_process.py b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_dvr_process.py index 46dfc84f0f4..5c03d91217b 100644 --- a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_dvr_process.py +++ b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/br_dvr_process.py @@ -21,7 +21,7 @@ from os_ken.lib.packet import icmpv6 from os_ken.lib.packet import in_proto -class OVSDVRInterfaceMixin(object): +class OVSDVRInterfaceMixin: def delete_arp_destination_change(self, target_mac_address, orig_mac_address): @@ -60,7 +60,7 @@ class OVSDVRInterfaceMixin(object): instructions=instructions) -class OVSDVRProcessMixin(object): +class OVSDVRProcessMixin: """Common logic for br-tun and br-phys' DVR_PROCESS tables. Inheriters should provide self.dvr_process_table_id and diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ofswitch.py b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ofswitch.py index 610aca37385..06e2f271a36 100644 --- a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ofswitch.py +++ b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ofswitch.py @@ -42,7 +42,7 @@ class ActiveBundleRunning(exceptions.NeutronException): message = _("Another active bundle 0x%(bundle_id)x is running") -class OpenFlowSwitchMixin(object): +class OpenFlowSwitchMixin: """Mixin to provide common convenient routines for an openflow switch. NOTE(yamamoto): super() points to ovs_lib.OVSBridge. @@ -59,7 +59,7 @@ class OpenFlowSwitchMixin(object): def __init__(self, *args, **kwargs): self._app = kwargs.pop('os_ken_app') self.active_bundles = set() - super(OpenFlowSwitchMixin, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) def _get_dp_by_dpid(self, dpid_int): """Get os-ken datapath object for the switch.""" @@ -175,7 +175,7 @@ class OpenFlowSwitchMixin(object): return flows def _dump_and_clean(self, table_id=None): - cookies = set([f.cookie for f in self.dump_flows(table_id)]) - \ + cookies = {f.cookie for f in self.dump_flows(table_id)} - \ self.reserved_cookies for c in cookies: LOG.warning("Deleting flow with cookie 0x%(cookie)x", @@ -253,7 +253,7 @@ class OpenFlowSwitchMixin(object): return BundledOpenFlowBridge(self, atomic, ordered) -class BundledOpenFlowBridge(object): +class BundledOpenFlowBridge: def __init__(self, br, atomic, ordered): self.br = br self.active_bundle = None diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_oskenapp.py b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_oskenapp.py index 200a8c9f37d..959089e86f0 100644 --- a/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_oskenapp.py +++ b/neutron/plugins/ml2/drivers/openvswitch/agent/openflow/native/ovs_oskenapp.py @@ -53,7 +53,7 @@ def agent_main_wrapper(bridge_classes): class OVSNeutronAgentOSKenApp(base_oskenapp.BaseNeutronAgentOSKenApp): def start(self): # Start os-ken event loop thread - super(OVSNeutronAgentOSKenApp, self).start() + super().start() def _make_br_cls(br_cls): return functools.partial(br_cls, os_ken_app=self) diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_agent_extension_api.py b/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_agent_extension_api.py index aa4c3f4724c..22eab779fd6 100644 --- a/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_agent_extension_api.py +++ b/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_agent_extension_api.py @@ -14,7 +14,7 @@ # under the License. -class OVSCookieBridge(object): +class OVSCookieBridge: '''Bridge restricting flow operations to its own distinct cookie This class creates a bridge derived from a bridge passed at init (which @@ -33,7 +33,7 @@ class OVSCookieBridge(object): pass -class OVSAgentExtensionAPI(object): +class OVSAgentExtensionAPI: '''Implements the Agent API for Open vSwitch agent. Extensions can gain access to this API by overriding the consume_api @@ -44,7 +44,7 @@ class OVSAgentExtensionAPI(object): plugin_rpc=None, phys_ofports=None, bridge_mappings=None): - super(OVSAgentExtensionAPI, self).__init__() + super().__init__() self.br_int = int_br self.br_tun = tun_br self.br_phys = phys_brs or {} diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py b/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py index 971ca122506..b398801aa1d 100644 --- a/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py +++ b/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py @@ -34,7 +34,7 @@ LOG = logging.getLogger(__name__) # A class to represent a DVR-hosted subnet including vif_ports resident on # that subnet -class LocalDVRSubnetMapping(object): +class LocalDVRSubnetMapping: def __init__(self, subnet, csnat_ofport=ovs_constants.OFPORT_INVALID): # set of compute ports on this dvr subnet self.compute_ports = {} @@ -87,7 +87,7 @@ class LocalDVRSubnetMapping(object): return self.dvr_ports -class OVSPort(object): +class OVSPort: def __init__(self, id, ofport, mac, device_owner): self.id = id self.mac = mac @@ -136,7 +136,7 @@ class OVSPort(object): @profiler.trace_cls("ovs_dvr_agent") -class OVSDVRNeutronAgent(object): +class OVSDVRNeutronAgent: '''Implements OVS-based DVR (Distributed Virtual Router) agent''' # history # 1.0 Initial version diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py b/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py index fbec145e848..6af1b9d87a1 100644 --- a/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py +++ b/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py @@ -104,7 +104,7 @@ class PortInfo(collections.UserDict): 'removed': removed or set(), 'updated': updated or set(), 're_added': re_added or set()} - super(PortInfo, self).__init__(_dict) + super().__init__(_dict) def has_zero_prefixlen_address(ip_addresses): @@ -156,7 +156,7 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, :param bridge_classes: a dict for bridge classes. :param conf: an instance of ConfigOpts ''' - super(OVSNeutronAgent, self).__init__() + super().__init__() self.conf = conf or cfg.CONF self.ovs = ovs_lib.BaseOVS() self.ext_manager = ext_manager @@ -485,7 +485,7 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, segmentation_id = local_vlan_map.get('segmentation_id') if net_uuid: # TODO(sahid): This key thing should be normalized. - key = "%s/%s" % (net_uuid, segmentation_id) + key = "{}/{}".format(net_uuid, segmentation_id) if (key not in self._local_vlan_hints and local_vlan != ovs_const.DEAD_VLAN_TAG): self.available_local_vlans.remove(local_vlan) @@ -1011,7 +1011,7 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, except vlanmanager.MappingNotFound: # TODO(sahid): This local_vlan_hints should have its own # datastructure and model to be manipulated. - key = "%s/%s" % (net_uuid, segmentation_id) + key = "{}/{}".format(net_uuid, segmentation_id) lvid = self._local_vlan_hints.pop(key, None) if lvid is None: if not self.available_local_vlans: @@ -1842,11 +1842,11 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, old_ports_not_ready_attrs = self.int_br.get_ports_attributes( 'Interface', columns=['name', 'external_ids', 'ofport'], ports=old_ports_not_ready, if_exists=True) - now_ready_ports = set( - [p['name'] for p in old_ports_not_ready_attrs]) + now_ready_ports = { + p['name'] for p in old_ports_not_ready_attrs} LOG.debug("Ports %s are now ready", now_ready_ports) old_ports_not_ready_yet = old_ports_not_ready - now_ready_ports - removed_ports = set([p['name'] for p in events['removed']]) + removed_ports = {p['name'] for p in events['removed']} old_ports_not_ready_yet -= removed_ports LOG.debug("Ports %s were not ready at last iteration and are not " "ready yet", old_ports_not_ready_yet) @@ -2369,7 +2369,7 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, priority=12, dl_vlan=lvm.vlan, dl_dst=port_detail['mac_address'], - actions='strip_vlan,output:{:d}'.format(port.ofport)) + actions=f'strip_vlan,output:{port.ofport:d}') # For packets from internal ports or VM ports. br_int.add_flow( @@ -2377,7 +2377,7 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, priority=12, reg6=lvm.vlan, dl_dst=port_detail['mac_address'], - actions='output:{:d}'.format(port.ofport)) + actions=f'output:{port.ofport:d}') patch_ofport = None if lvm.network_type in ( @@ -2509,7 +2509,7 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin, remote_tunnel_hash = cls.get_tunnel_hash(remote_ip, hashlen) if not remote_tunnel_hash: return None - return '%s-%s' % (network_type, remote_tunnel_hash) + return '{}-{}'.format(network_type, remote_tunnel_hash) def _agent_has_updates(self, polling_manager): return (polling_manager.is_polling_required or diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/vlanmanager.py b/neutron/plugins/ml2/drivers/openvswitch/agent/vlanmanager.py index 2ae2cf6f3b0..103a5e87c99 100644 --- a/neutron/plugins/ml2/drivers/openvswitch/agent/vlanmanager.py +++ b/neutron/plugins/ml2/drivers/openvswitch/agent/vlanmanager.py @@ -39,7 +39,7 @@ class NotUniqMapping(exceptions.NeutronException): message = _('Mapping VLAN for network %(net_id)s should be unique.') -class LocalVLANMapping(object): +class LocalVLANMapping: def __init__(self, vlan, network_type, physical_network, segmentation_id, vif_ports=None): self.vlan = vlan @@ -67,14 +67,14 @@ class LocalVLANMapping(object): return id(self) -class LocalVlanManager(object): +class LocalVlanManager: """Singleton manager that maps internal VLAN mapping to external network segmentation ids. """ def __new__(cls): if not hasattr(cls, '_instance'): - cls._instance = super(LocalVlanManager, cls).__new__(cls) + cls._instance = super().__new__(cls) return cls._instance def __init__(self): diff --git a/neutron/plugins/ml2/drivers/openvswitch/mech_driver/mech_openvswitch.py b/neutron/plugins/ml2/drivers/openvswitch/mech_driver/mech_openvswitch.py index 1dc9697e4a5..afd1dc52899 100644 --- a/neutron/plugins/ml2/drivers/openvswitch/mech_driver/mech_openvswitch.py +++ b/neutron/plugins/ml2/drivers/openvswitch/mech_driver/mech_openvswitch.py @@ -69,7 +69,7 @@ class OpenvswitchMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase): portbindings.VNIC_VHOST_VDPA, ] prohibit_list = cfg.CONF.OVS_DRIVER.vnic_type_prohibit_list - super(OpenvswitchMechanismDriver, self).__init__( + super().__init__( constants.AGENT_TYPE_OVS, portbindings.VIF_TYPE_OVS, vif_details, @@ -130,7 +130,7 @@ class OpenvswitchMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase): LOG.debug("Refusing to bind due to unsupported vnic_type: %s with " "no switchdev capability", portbindings.VNIC_DIRECT) return - super(OpenvswitchMechanismDriver, self).bind_port(context) + super().bind_port(context) def get_supported_vif_type(self, agent): caps = agent['configurations'].get('ovs_capabilities', {}) diff --git a/neutron/plugins/ml2/drivers/ovn/agent/neutron_agent.py b/neutron/plugins/ml2/drivers/ovn/agent/neutron_agent.py index 00389e19af4..14d02ee82aa 100644 --- a/neutron/plugins/ml2/drivers/ovn/agent/neutron_agent.py +++ b/neutron/plugins/ml2/drivers/ovn/agent/neutron_agent.py @@ -25,7 +25,7 @@ from neutron.common.ovn import utils as ovn_utils from neutron.common import utils -class DeletedChassis(object): +class DeletedChassis: external_ids = {} hostname = '("Chassis" register deleted)' name = '("Chassis" register deleted)' @@ -249,7 +249,7 @@ class OVNNeutronAgent(NeutronAgent): @utils.SingletonDecorator -class AgentCache(object): +class AgentCache: def __init__(self, driver=None): # This is just to make pylint happy because it doesn't like calls to # AgentCache() with no arguments, despite init only being called the diff --git a/neutron/plugins/ml2/drivers/ovn/db_migration.py b/neutron/plugins/ml2/drivers/ovn/db_migration.py index 3b3ba5ee6c6..b0977acd55f 100644 --- a/neutron/plugins/ml2/drivers/ovn/db_migration.py +++ b/neutron/plugins/ml2/drivers/ovn/db_migration.py @@ -59,8 +59,8 @@ def migrate_neutron_database_to_ovn(): segment.segmentation_id).update({"allocated": False}) # Update ``PortBinding`` objects. - pb_updated = set([]) - pb_missed = set([]) + pb_updated = set() + pb_missed = set() while True: pb_current = port_obj.PortBinding.get_port_id_and_host( ctx, vif_type='ovs', vnic_type='normal', status='ACTIVE') @@ -104,7 +104,7 @@ def migrate_neutron_database_to_ovn(): 'records: %s', ', '.join(pb_missed)) # Update ``Trunk`` objects. - trunk_updated = set([]) + trunk_updated = set() while True: trunk_current = trunk_obj.Trunk.get_trunk_ids(ctx) diff = set(trunk_current).difference(trunk_updated) diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py index 87ac21454c4..4d0983e92e9 100644 --- a/neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py +++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py @@ -866,11 +866,11 @@ class OVNMechanismDriver(api.MechanismDriver): # the port status from DOWN to UP in order to generate 'fake' # vif-interface-plugged event. This workaround is needed to # perform live-migration with live_migration_wait_for_vif_plug=True. - if ((port['status'] == const.PORT_STATUS_DOWN and - ovn_const.MIGRATING_ATTR in port[portbindings.PROFILE].keys() and - port[portbindings.VIF_TYPE] in ( + if (port['status'] == const.PORT_STATUS_DOWN and + ovn_const.MIGRATING_ATTR in port[portbindings.PROFILE].keys() and + port[portbindings.VIF_TYPE] in ( portbindings.VIF_TYPE_OVS, - portbindings.VIF_TYPE_VHOST_USER))): + portbindings.VIF_TYPE_VHOST_USER)): LOG.info("Setting port %s status from DOWN to UP in order " "to emit vif-interface-plugged event.", port['id']) diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/commands.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/commands.py index 3d2872736b2..0c202296c10 100644 --- a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/commands.py +++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/commands.py @@ -78,7 +78,7 @@ def _add_gateway_chassis(api, txn, lrp_name, val): prio = len(val) uuid_list = [] for chassis in val: - gwc_name = '%s_%s' % (lrp_name, chassis) + gwc_name = '{}_{}'.format(lrp_name, chassis) try: gwc = idlutils.row_by_value(api.idl, 'Gateway_Chassis', @@ -103,7 +103,7 @@ def _add_gateway_chassis(api, txn, lrp_name, val): class CheckLivenessCommand(command.BaseCommand): def __init__(self, api): - super(CheckLivenessCommand, self).__init__(api) + super().__init__(api) def run_idl(self, txn): # txn.pre_commit responsible for updating nb_global.nb_cfg, but @@ -116,7 +116,7 @@ class CheckLivenessCommand(command.BaseCommand): class AddLSwitchPortCommand(command.BaseCommand): def __init__(self, api, lport, lswitch, may_exist, **columns): - super(AddLSwitchPortCommand, self).__init__(api) + super().__init__(api) self.lport = lport self.lswitch = lswitch self.may_exist = may_exist @@ -170,7 +170,7 @@ class AddLSwitchPortCommand(command.BaseCommand): class SetLSwitchPortCommand(command.BaseCommand): def __init__(self, api, lport, external_ids_update, if_exists, **columns): - super(SetLSwitchPortCommand, self).__init__(api) + super().__init__(api) self.lport = lport self.external_ids_update = external_ids_update self.columns = columns @@ -272,7 +272,7 @@ class UpdateLSwitchPortQosOptionsCommand(command.BaseCommand): class DelLSwitchPortCommand(command.BaseCommand): def __init__(self, api, lport, lswitch, if_exists): - super(DelLSwitchPortCommand, self).__init__(api) + super().__init__(api) self.lport = lport self.lswitch = lswitch self.if_exists = if_exists @@ -301,7 +301,7 @@ class DelLSwitchPortCommand(command.BaseCommand): class UpdateLRouterCommand(command.BaseCommand): def __init__(self, api, name, if_exists, **columns): - super(UpdateLRouterCommand, self).__init__(api) + super().__init__(api) self.name = name self.columns = columns self.if_exists = if_exists @@ -431,7 +431,7 @@ class LrDelCommand(ovn_nb_commands.LrDelCommand): class AddLRouterPortCommand(command.BaseCommand): def __init__(self, api, name, lrouter, may_exist, **columns): - super(AddLRouterPortCommand, self).__init__(api) + super().__init__(api) self.name = name self.lrouter = lrouter self.may_exist = may_exist @@ -470,7 +470,7 @@ class AddLRouterPortCommand(command.BaseCommand): class UpdateLRouterPortCommand(command.BaseCommand): def __init__(self, api, name, if_exists, **columns): - super(UpdateLRouterPortCommand, self).__init__(api) + super().__init__(api) self.name = name self.columns = columns self.if_exists = if_exists @@ -495,7 +495,7 @@ class UpdateLRouterPortCommand(command.BaseCommand): class DelLRouterPortCommand(command.BaseCommand): def __init__(self, api, name, lrouter, if_exists): - super(DelLRouterPortCommand, self).__init__(api) + super().__init__(api) self.name = name self.lrouter = lrouter self.if_exists = if_exists @@ -524,7 +524,7 @@ class DelLRouterPortCommand(command.BaseCommand): class SetLRouterPortInLSwitchPortCommand(command.BaseCommand): def __init__(self, api, lswitch_port, lrouter_port, is_gw_port, if_exists, lsp_address): - super(SetLRouterPortInLSwitchPortCommand, self).__init__(api) + super().__init__(api) self.lswitch_port = lswitch_port self.lrouter_port = lrouter_port self.is_gw_port = is_gw_port @@ -587,7 +587,7 @@ class SetLRouterMacAgeLimitCommand(command.BaseCommand): class AddACLCommand(command.BaseCommand): def __init__(self, api, lswitch, lport, **columns): - super(AddACLCommand, self).__init__(api) + super().__init__(api) self.lswitch = lswitch self.lport = lport self.columns = columns @@ -608,7 +608,7 @@ class AddACLCommand(command.BaseCommand): class DelACLCommand(command.BaseCommand): def __init__(self, api, lswitch, lport, if_exists): - super(DelACLCommand, self).__init__(api) + super().__init__(api) self.lswitch = lswitch self.lport = lport self.if_exists = if_exists @@ -636,7 +636,7 @@ class DelACLCommand(command.BaseCommand): class AddStaticRouteCommand(command.BaseCommand): def __init__(self, api, lrouter, maintain_bfd=False, **columns): - super(AddStaticRouteCommand, self).__init__(api) + super().__init__(api) self.lrouter = lrouter self.maintain_bfd = maintain_bfd self.columns = columns @@ -677,7 +677,7 @@ class AddStaticRouteCommand(command.BaseCommand): class DelStaticRoutesCommand(command.BaseCommand): def __init__(self, api, lrouter, routes, if_exists): - super(DelStaticRoutesCommand, self).__init__(api) + super().__init__(api) self.lrouter = lrouter self.routes = routes self.if_exists = if_exists @@ -726,7 +726,7 @@ class UpdateObjectExtIdsCommand(command.BaseCommand): field = 'name' def __init__(self, api, record, external_ids, if_exists): - super(UpdateObjectExtIdsCommand, self).__init__(api) + super().__init__(api) self.record = record self.external_ids = external_ids self.if_exists = if_exists @@ -763,7 +763,7 @@ class UpdateLbExternalIds(UpdateObjectExtIdsCommand): class AddDHCPOptionsCommand(command.BaseCommand): def __init__(self, api, subnet_id, port_id=None, may_exist=True, **columns): - super(AddDHCPOptionsCommand, self).__init__(api) + super().__init__(api) self.columns = columns self.may_exist = may_exist self.subnet_id = subnet_id @@ -799,7 +799,7 @@ class AddDHCPOptionsCommand(command.BaseCommand): class DelDHCPOptionsCommand(command.BaseCommand): def __init__(self, api, row_uuid, if_exists=True): - super(DelDHCPOptionsCommand, self).__init__(api) + super().__init__(api) self.if_exists = if_exists self.row_uuid = row_uuid @@ -816,7 +816,7 @@ class DelDHCPOptionsCommand(command.BaseCommand): class AddNATRuleInLRouterCommand(command.BaseCommand): # TODO(chandrav): Add unit tests, bug #1638715. def __init__(self, api, lrouter, **columns): - super(AddNATRuleInLRouterCommand, self).__init__(api) + super().__init__(api) self.lrouter = lrouter self.columns = columns @@ -838,7 +838,7 @@ class DeleteNATRuleInLRouterCommand(command.BaseCommand): # TODO(chandrav): Add unit tests, bug #1638715. def __init__(self, api, lrouter, type, logical_ip, external_ip, if_exists): - super(DeleteNATRuleInLRouterCommand, self).__init__(api) + super().__init__(api) self.lrouter = lrouter self.type = type self.logical_ip = logical_ip @@ -866,7 +866,7 @@ class DeleteNATRuleInLRouterCommand(command.BaseCommand): class SetNATRuleInLRouterCommand(command.BaseCommand): def __init__(self, api, lrouter, nat_rule_uuid, **columns): - super(SetNATRuleInLRouterCommand, self).__init__(api) + super().__init__(api) self.lrouter = lrouter self.nat_rule_uuid = nat_rule_uuid self.columns = columns @@ -889,7 +889,7 @@ class SetNATRuleInLRouterCommand(command.BaseCommand): class CheckRevisionNumberCommand(command.BaseCommand): def __init__(self, api, name, resource, resource_type, if_exists): - super(CheckRevisionNumberCommand, self).__init__(api) + super().__init__(api) self.name = name self.resource = resource self.resource_type = resource_type @@ -966,7 +966,7 @@ class CheckRevisionNumberCommand(command.BaseCommand): class DeleteLRouterExtGwCommand(command.BaseCommand): def __init__(self, api, lrouter, if_exists, maintain_bfd=True): - super(DeleteLRouterExtGwCommand, self).__init__(api) + super().__init__(api) self.lrouter = lrouter self.if_exists = if_exists self.maintain_bfd = maintain_bfd @@ -1019,7 +1019,7 @@ class DeleteLRouterExtGwCommand(command.BaseCommand): class SetLSwitchPortToVirtualTypeCommand(command.BaseCommand): def __init__(self, api, lport, vip, parent, if_exists): - super(SetLSwitchPortToVirtualTypeCommand, self).__init__(api) + super().__init__(api) self.lport = lport self.vip = vip self.parent = parent @@ -1051,7 +1051,7 @@ class SetLSwitchPortToVirtualTypeCommand(command.BaseCommand): class UnsetLSwitchPortToVirtualTypeCommand(command.BaseCommand): def __init__(self, api, lport, parent, if_exists): - super(UnsetLSwitchPortToVirtualTypeCommand, self).__init__(api) + super().__init__(api) self.lport = lport self.parent = parent self.if_exists = if_exists diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/extensions/placement.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/extensions/placement.py index b0303817c2e..03f3c7d0f56 100644 --- a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/extensions/placement.py +++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/extensions/placement.py @@ -154,7 +154,7 @@ class ChassisBandwidthConfigEvent(row_event.RowEvent): @common_utils.SingletonDecorator -class OVNClientPlacementExtension(object): +class OVNClientPlacementExtension: """OVN client Placement API extension""" def __init__(self, driver): @@ -225,8 +225,9 @@ class OVNClientPlacementExtension(object): chassis = self.get_chassis_config() for state in chassis.values(): _send_deferred_batch(state) - msg = ', '.join(['Chassis %s: %s' % (name, dict_chassis_config(state)) - for (name, state) in chassis.items()]) or '(no info)' + msg = ', '.join(['Chassis {}: {}'.format( + name, dict_chassis_config(state)) + for (name, state) in chassis.items()]) or '(no info)' LOG.debug('OVN chassis Placement initial configuration: %s', msg) return chassis diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/extensions/qos.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/extensions/qos.py index 1b663c5a64b..00b26479b79 100644 --- a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/extensions/qos.py +++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/extensions/qos.py @@ -34,12 +34,12 @@ OVN_QOS_DEFAULT_RULE_PRIORITY = 2002 _MIN_RATE = ovn_const.LSP_OPTIONS_QOS_MIN_RATE -class OVNClientQosExtension(object): +class OVNClientQosExtension: """OVN client QoS extension""" def __init__(self, driver=None, nb_idl=None): LOG.info('Starting OVNClientQosExtension') - super(OVNClientQosExtension, self).__init__() + super().__init__() self._driver = driver self._nb_idl = nb_idl self._plugin_property = None @@ -116,7 +116,7 @@ class OVNClientQosExtension(object): in_or_out = 'outport' src_or_dst = 'dst' - match = '%s == "%s"' % (in_or_out, port_id) + match = '{} == "{}"'.format(in_or_out, port_id) if ip_address and resident_port: match += (' && ip4.%s == %s && is_chassis_resident("%s")' % (src_or_dst, ip_address, resident_port)) @@ -324,9 +324,9 @@ class OVNClientQosExtension(object): def update_network(self, txn, network, original_network, reset=False, qos_rules=None): - updated_port_ids = set([]) - updated_fip_ids = set([]) - updated_router_ids = set([]) + updated_port_ids = set() + updated_fip_ids = set() + updated_router_ids = set() if not reset and not original_network: # If there is no information about the previous QoS policy, do not # make any change. @@ -459,9 +459,9 @@ class OVNClientQosExtension(object): txn.add(self.nb_idl.qos_del(**ovn_rule, if_exists=True)) def update_policy(self, context, policy): - updated_port_ids = set([]) - updated_fip_ids = set([]) - updated_router_ids = set([]) + updated_port_ids = set() + updated_fip_ids = set() + updated_router_ids = set() bound_networks = policy.get_bound_networks() bound_ports = policy.get_bound_ports() bound_fips = policy.get_bound_floatingips() diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/impl_idl_ovn.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/impl_idl_ovn.py index 0ff6762f7c1..7d1d56e5a58 100644 --- a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/impl_idl_ovn.py +++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/impl_idl_ovn.py @@ -66,7 +66,7 @@ class OvnNbTransaction(idl_trans.Transaction): # NOTE(lucasagomes): The bump_nb_cfg parameter is only used by # the agents health status check self.bump_nb_cfg = kwargs.pop('bump_nb_cfg', False) - super(OvnNbTransaction, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) def pre_commit(self, txn): if not self.bump_nb_cfg: @@ -80,7 +80,7 @@ def add_keepalives(fn): error, sock = fn(*args, **kwargs) try: sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) - except socket.error as e: + except OSError as e: sock.close() return socket_util.get_exception_errno(e), None return error, sock @@ -121,7 +121,7 @@ class Backend(ovs_idl.Backend): def __init__(self, connection): self.ovsdb_connection = connection - super(Backend, self).__init__(connection) + super().__init__(connection) def start_connection(self, connection): try: @@ -235,7 +235,7 @@ def get_ovn_idls(driver, trigger): class OvsdbNbOvnIdl(nb_impl_idl.OvnNbApiIdlImpl, Backend): def __init__(self, connection): - super(OvsdbNbOvnIdl, self).__init__(connection) + super().__init__(connection) @n_utils.classproperty def connection_string(cls): @@ -271,7 +271,7 @@ class OvsdbNbOvnIdl(nb_impl_idl.OvnNbApiIdlImpl, Backend): """ revision_mismatch_raise = kwargs.pop('revision_mismatch_raise', False) try: - with super(OvsdbNbOvnIdl, self).transaction(*args, **kwargs) as t: + with super().transaction(*args, **kwargs) as t: yield t except ovn_exc.RevisionConflict as e: LOG.info('Transaction aborted. Reason: %s', e) @@ -587,7 +587,7 @@ class OvsdbNbOvnIdl(nb_impl_idl.OvnNbApiIdlImpl, Backend): if not physnet: continue - lrp_name = '%s%s' % (ovn_const.LRP_PREFIX, port) + lrp_name = '{}{}'.format(ovn_const.LRP_PREFIX, port) original_state = self.get_gateway_chassis_binding(lrp_name) az_hints = self.get_gateway_chassis_az_hints(lrp_name) # Filter out chassis that lost physnet, the cms option, @@ -906,7 +906,7 @@ class OvsdbNbOvnIdl(nb_impl_idl.OvnNbApiIdlImpl, Backend): class OvsdbSbOvnIdl(sb_impl_idl.OvnSbApiIdlImpl, Backend): def __init__(self, connection): - super(OvsdbSbOvnIdl, self).__init__(connection) + super().__init__(connection) @n_utils.classproperty def connection_string(cls): diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/maintenance.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/maintenance.py index 921c4f6e62b..683205b6082 100644 --- a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/maintenance.py +++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/maintenance.py @@ -82,7 +82,7 @@ def has_lock_periodic(*args, periodic_run_limit=0, **kwargs): return wrapper -class MaintenanceThread(object): +class MaintenanceThread: def __init__(self): self._callables = [] @@ -124,7 +124,7 @@ class OVNNBDBReconnectionEvent(row_event.RowEvent): self.version = version table = 'Connection' events = (self.ROW_CREATE,) - super(OVNNBDBReconnectionEvent, self).__init__(events, table, None) + super().__init__(events, table, None) self.event_name = self.__class__.__name__ def run(self, event, row, old): @@ -134,7 +134,7 @@ class OVNNBDBReconnectionEvent(row_event.RowEvent): self.version = curr_version -class SchemaAwarePeriodicsBase(object): +class SchemaAwarePeriodicsBase: def __init__(self, ovn_client): self._nb_idl = ovn_client._nb_idl @@ -175,7 +175,7 @@ class DBInconsistenciesPeriodics(SchemaAwarePeriodicsBase): self._idl = self._nb_idl.idl self._idl.set_lock('ovn_db_inconsistencies_periodics') self._sync_timer = timeutils.StopWatch() - super(DBInconsistenciesPeriodics, self).__init__(ovn_client) + super().__init__(ovn_client) self._resources_func_map = { ovn_const.TYPE_NETWORKS: { @@ -330,7 +330,7 @@ class DBInconsistenciesPeriodics(SchemaAwarePeriodicsBase): else: c[f.resource_type] += 1 - fail_str = ', '.join('{}={}'.format(k, v) for k, v in c.items()) + fail_str = ', '.join(f'{k}={v}' for k, v in c.items()) LOG.debug('Maintenance task: Number of inconsistencies ' 'found at %(type_)s: %(fail_str)s', {'type_': type_, 'fail_str': fail_str}) @@ -503,7 +503,7 @@ class DBInconsistenciesPeriodics(SchemaAwarePeriodicsBase): flood = ls.other_config.get(ovn_const.MCAST_FLOOD_UNREGISTERED) if (not ls.name or (snooping == snooping_conf and - flood == flood_conf)): + flood == flood_conf)): continue cmds.append(self._nb_idl.db_set( @@ -920,14 +920,14 @@ class DBInconsistenciesPeriodics(SchemaAwarePeriodicsBase): for fip in fip_update: lrouter = utils.ovn_name(fip['router_id']) if lrouter not in gw_port_id_cache.keys(): - router_db = self._ovn_client._l3_plugin.get_router(context, - fip['router_id'], fields=['gw_port_id']) + router_db = self._ovn_client._l3_plugin.get_router( + context, fip['router_id'], fields=['gw_port_id']) gw_port_id_cache[lrouter] = router_db.get('gw_port_id') lrp_cache[lrouter] = self._nb_idl.get_lrouter_port( gw_port_id_cache[lrouter]) columns = {'gateway_port': lrp_cache[lrouter].uuid} - cmds.append(self._nb_idl.set_nat_rule_in_lrouter(lrouter, - fip['uuid'], **columns)) + cmds.append(self._nb_idl.set_nat_rule_in_lrouter( + lrouter, fip['uuid'], **columns)) if cmds: with self._nb_idl.transaction(check_error=True) as txn: @@ -946,7 +946,7 @@ class DBInconsistenciesPeriodics(SchemaAwarePeriodicsBase): context = n_context.get_admin_context() pra_list = servicetype_obj.ProviderResourceAssociation.get_objects( context, provider_name=provider_name) - pra_res_ids = set(pra.resource_id for pra in pra_list) + pra_res_ids = {pra.resource_id for pra in pra_list} with db_api.CONTEXT_WRITER.using(context): for lr in self._nb_idl.lr_list().execute(check_error=True): router_id = lr.name.replace('neutron-', '') @@ -1029,7 +1029,8 @@ class DBInconsistenciesPeriodics(SchemaAwarePeriodicsBase): ls = self._nb_idl.get_lswitch(ls_name) broadcast_value = ls.other_config.get( ovn_const.LS_OPTIONS_BROADCAST_ARPS_ROUTERS) - expected_broadcast_value = ('true' + expected_broadcast_value = ( + 'true' if ovn_conf.is_broadcast_arps_to_all_routers_enabled() else 'false') # Assert the config value is the right one @@ -1071,8 +1072,8 @@ class DBInconsistenciesPeriodics(SchemaAwarePeriodicsBase): for sroute in sroute_update: lrouter = utils.ovn_name(sroute['name']) if lrouter not in routes_cache.keys(): - router_db = self._ovn_client._l3_plugin.get_router(context, - sroute['name'], fields=['routes']) + router_db = self._ovn_client._l3_plugin.get_router( + context, sroute['name'], fields=['routes']) routes_cache[lrouter] = router_db.get('routes') ovn_route = sroute['sroute'] @@ -1105,7 +1106,7 @@ class DBInconsistenciesPeriodics(SchemaAwarePeriodicsBase): raise periodics.NeverAgain() -class HashRingHealthCheckPeriodics(object): +class HashRingHealthCheckPeriodics: def __init__(self, group): self._group = group diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_client.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_client.py index 65b0179aaf5..de53f392c3b 100644 --- a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_client.py +++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_client.py @@ -100,7 +100,7 @@ GW_INFO = collections.namedtuple('GatewayInfo', ['network_id', 'subnet_id', 'ip_version', 'ip_prefix']) -class OVNClient(object): +class OVNClient: def __init__(self, nb_idl, sb_idl): self._nb_idl = nb_idl @@ -207,7 +207,7 @@ class OVNClient(object): subnet_opt = subnet_opts.get(opt) if not subnet_opt: return port_opt - return '{%s, %s}' % (subnet_opt[1:-1], port_opt[1:-1]) + return '{{{}, {}}}'.format(subnet_opt[1:-1], port_opt[1:-1]) def _get_port_dhcp_options(self, port, ip_version): """Return dhcp options for port. @@ -989,8 +989,8 @@ class OVNClient(object): ).execute(check_error=True) all_lswitches = self._nb_idl.db_find_rows( 'Logical_Switch').execute(check_error=True) - attached_lbs = set( - lb for item in all_lswitches for lb in item.load_balancer) + attached_lbs = { + lb for item in all_lswitches for lb in item.load_balancer} commands = [] for lb in lbs: @@ -1265,8 +1265,8 @@ class OVNClient(object): subnet_id = fixed_ip['subnet_id'] subnet = self._plugin.get_subnet(context, subnet_id) cidr = netaddr.IPNetwork(subnet['cidr']) - networks.add("%s/%s" % (fixed_ip['ip_address'], - str(cidr.prefixlen))) + networks.add("{}/{}".format(fixed_ip['ip_address'], + str(cidr.prefixlen))) if subnet.get('ipv6_address_mode') and not ipv6_ra_configs and ( ipv6_ra_configs_supported): @@ -2076,9 +2076,10 @@ class OVNClient(object): ovn_conf.get_fdb_age_threshold()) if utils.is_external_network(network): params['other_config'][ - ovn_const.LS_OPTIONS_BROADCAST_ARPS_ROUTERS] = ('true' - if ovn_conf.is_broadcast_arps_to_all_routers_enabled() else - 'false') + ovn_const.LS_OPTIONS_BROADCAST_ARPS_ROUTERS] = ( + 'true' + if ovn_conf.is_broadcast_arps_to_all_routers_enabled() else + 'false') return params def create_network(self, context, network): @@ -2127,8 +2128,8 @@ class OVNClient(object): **options)) self._transaction(commands, txn=txn) - def _check_network_changes_in_ha_chassis_groups(self, - context, lswitch, lswitch_params, txn): + def _check_network_changes_in_ha_chassis_groups( + self, context, lswitch, lswitch_params, txn): """Check for changes in the HA Chassis Groups. Check for changes in the HA Chassis Groups upon a network update. @@ -2345,11 +2346,11 @@ class OVNClient(object): routes = [] if metadata_port_ip: - routes.append('%s,%s' % ( + routes.append('{},{}'.format( const.METADATA_V4_CIDR, metadata_port_ip)) # Add subnet host_routes to 'classless_static_route' dhcp option - routes.extend(['%s,%s' % (route['destination'], route['nexthop']) + routes.extend(['{},{}'.format(route['destination'], route['nexthop']) for route in subnet['host_routes']]) if routes: @@ -2653,8 +2654,8 @@ class OVNClient(object): "for network %s", network_id) return False - port_subnet_ids = set(ip['subnet_id'] for ip in - metadata_port['fixed_ips']) + port_subnet_ids = {ip['subnet_id'] for ip in + metadata_port['fixed_ips']} # If this method is called from "create_subnet" or "update_subnet", # only the fixed IP address from this subnet should be updated in the @@ -2673,7 +2674,7 @@ class OVNClient(object): network_id=[network_id], ip_version=[const.IP_VERSION_4], enable_dhcp=[True])) - subnet_ids = set(s['id'] for s in subnets) + subnet_ids = {s['id'] for s in subnets} # Find all subnets where metadata port doesn't have an IP in and # allocate one. diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_db_sync.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_db_sync.py index 0caedaaffe6..c99b1d7d754 100644 --- a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_db_sync.py +++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_db_sync.py @@ -45,7 +45,7 @@ SYNC_MODE_LOG = 'log' SYNC_MODE_REPAIR = 'repair' -class OvnDbSynchronizer(object, metaclass=abc.ABCMeta): +class OvnDbSynchronizer(metaclass=abc.ABCMeta): def __init__(self, core_plugin, ovn_api, ovn_driver): self.ovn_driver = ovn_driver @@ -71,7 +71,7 @@ class OvnNbSynchronizer(OvnDbSynchronizer): """Synchronizer class for NB.""" def __init__(self, core_plugin, ovn_api, sb_ovn, mode, ovn_driver): - super(OvnNbSynchronizer, self).__init__( + super().__init__( core_plugin, ovn_api, ovn_driver) self.mode = mode self.l3_plugin = directory.get_plugin(plugin_constants.L3) @@ -91,7 +91,7 @@ class OvnNbSynchronizer(OvnDbSynchronizer): if utils.is_ovn_l3(self.l3_plugin): self.l3_plugin._nb_ovn.ovsdb_connection.stop() self.l3_plugin._sb_ovn.ovsdb_connection.stop() - super(OvnNbSynchronizer, self).stop() + super().stop() def do_sync(self): if self.mode == SYNC_MODE_OFF: @@ -149,7 +149,7 @@ class OvnNbSynchronizer(OvnDbSynchronizer): @var acl_list_dict: Dictionary of acl-lists based on lport as key @return: acl_list-dict """ - lswitch_names = set([]) + lswitch_names = set() for network in self.core_plugin.get_networks(context): lswitch_names.add(network['id']) acl_dict, ignore1, ignore2 = ( @@ -199,8 +199,8 @@ class OvnNbSynchronizer(OvnDbSynchronizer): if self.mode == SYNC_MODE_REPAIR and (add_pgs or remove_pgs): if add_pgs: db_ports = self.core_plugin.get_ports(ctx) - ovn_ports = set(p.name for p in - self.ovn_api.lsp_list().execute()) + ovn_ports = {p.name for p in + self.ovn_api.lsp_list().execute()} with self.ovn_api.transaction(check_error=True) as txn: pg = ovn_const.OVN_DROP_PORT_GROUP_NAME # Process default drop port group first @@ -1336,7 +1336,7 @@ class OvnSbSynchronizer(OvnDbSynchronizer): """Synchronizer class for SB.""" def __init__(self, core_plugin, ovn_api, ovn_driver): - super(OvnSbSynchronizer, self).__init__( + super().__init__( core_plugin, ovn_api, ovn_driver) self.l3_plugin = directory.get_plugin(plugin_constants.L3) diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovsdb_monitor.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovsdb_monitor.py index c0a8609ef5e..56f379fd9e6 100644 --- a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovsdb_monitor.py +++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovsdb_monitor.py @@ -51,7 +51,7 @@ class BaseEvent(row_event.RowEvent): def __init__(self): self.event_name = self.__class__.__name__ - super(BaseEvent, self).__init__(self.events, self.table, None) + super().__init__(self.events, self.table, None) @abc.abstractmethod def match_fn(self, event, row, old=None): @@ -75,7 +75,7 @@ class ChassisEvent(row_event.RowEvent): self.l3_plugin = directory.get_plugin(constants.L3) table = 'Chassis' events = (self.ROW_CREATE, self.ROW_UPDATE, self.ROW_DELETE) - super(ChassisEvent, self).__init__(events, table, None) + super().__init__(events, table, None) self.event_name = 'ChassisEvent' def _get_ha_chassis_groups_within_azs(self, az_hints): @@ -258,7 +258,7 @@ class PortBindingChassisUpdateEvent(row_event.RowEvent): self.driver = driver table = 'Port_Binding' events = (self.ROW_UPDATE,) - super(PortBindingChassisUpdateEvent, self).__init__( + super().__init__( events, table, None) self.event_name = self.__class__.__name__ @@ -489,7 +489,7 @@ class LogicalSwitchPortUpdateUpEvent(row_event.RowEvent): self.driver = driver table = 'Logical_Switch_Port' events = (self.ROW_UPDATE,) - super(LogicalSwitchPortUpdateUpEvent, self).__init__( + super().__init__( events, table, None) self.event_name = 'LogicalSwitchPortUpdateUpEvent' @@ -519,7 +519,7 @@ class LogicalSwitchPortUpdateDownEvent(row_event.RowEvent): self.driver = driver table = 'Logical_Switch_Port' events = (self.ROW_UPDATE,) - super(LogicalSwitchPortUpdateDownEvent, self).__init__( + super().__init__( events, table, None) self.event_name = 'LogicalSwitchPortUpdateDownEvent' @@ -666,7 +666,7 @@ class FIPAddDeleteEvent(row_event.RowEvent): self.driver = driver table = 'NAT' events = (self.ROW_CREATE, self.ROW_DELETE) - super(FIPAddDeleteEvent, self).__init__( + super().__init__( events, table, (('type', '=', 'dnat_and_snat'),)) self.event_name = 'FIPAddDeleteEvent' @@ -727,7 +727,7 @@ class HAChassisGroupRouterEvent(row_event.RowEvent): class OvnDbNotifyHandler(row_event.RowEventHandler): def __init__(self, driver): self.driver = driver - super(OvnDbNotifyHandler, self).__init__() + super().__init__() try: self._lock = self._RowEventHandler__lock self._watched_events = self._RowEventHandler__watched_events @@ -752,7 +752,7 @@ class Ml2OvnIdlBase(connection.OvsdbIdl): def __init__(self, remote, schema, probe_interval=(), **kwargs): if probe_interval == (): # None is a valid value to pass probe_interval = ovn_conf.get_ovn_ovsdb_probe_interval() - super(Ml2OvnIdlBase, self).__init__( + super().__init__( remote, schema, probe_interval=probe_interval, **kwargs) def set_table_condition(self, table_name, condition): @@ -768,7 +768,7 @@ class Ml2OvnIdlBase(connection.OvsdbIdl): class BaseOvnIdl(Ml2OvnIdlBase): def __init__(self, remote, schema, **kwargs): self.notify_handler = row_event.RowEventHandler() - super(BaseOvnIdl, self).__init__(remote, schema, **kwargs) + super().__init__(remote, schema, **kwargs) @classmethod def from_server(cls, connection_string, helper): @@ -797,7 +797,7 @@ class BaseOvnSbIdl(Ml2OvnIdlBase): class OvnIdl(BaseOvnIdl): def __init__(self, driver, remote, schema, **kwargs): - super(OvnIdl, self).__init__(remote, schema, **kwargs) + super().__init__(remote, schema, **kwargs) self.driver = driver self.notify_handler = OvnDbNotifyHandler(driver) @@ -816,7 +816,7 @@ class OvnIdl(BaseOvnIdl): class OvnIdlDistributedLock(BaseOvnIdl): def __init__(self, driver, remote, schema, **kwargs): - super(OvnIdlDistributedLock, self).__init__(remote, schema, **kwargs) + super().__init__(remote, schema, **kwargs) self.driver = driver self.notify_handler = OvnDbNotifyHandler(driver) self._node_uuid = self.driver.node_uuid @@ -870,7 +870,7 @@ class OvnIdlDistributedLock(BaseOvnIdl): class OvnNbIdl(OvnIdlDistributedLock): def __init__(self, driver, remote, schema): - super(OvnNbIdl, self).__init__(driver, remote, schema) + super().__init__(driver, remote, schema) self._lsp_update_up_event = LogicalSwitchPortUpdateUpEvent(driver) self._lsp_update_down_event = LogicalSwitchPortUpdateDownEvent(driver) self._lsp_create_event = LogicalSwitchPortCreateEvent(driver) @@ -912,7 +912,7 @@ class OvnNbIdl(OvnIdlDistributedLock): class OvnSbIdl(OvnIdlDistributedLock): def __init__(self, driver, remote, schema, **kwargs): - super(OvnSbIdl, self).__init__(driver, remote, schema, **kwargs) + super().__init__(driver, remote, schema, **kwargs) self.notify_handler.watch_events([ ChassisAgentDeleteEvent(self.driver), ChassisAgentDownEvent(self.driver), diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/worker.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/worker.py index 81ad25a5872..613ce345058 100644 --- a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/worker.py +++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/worker.py @@ -24,18 +24,18 @@ class MaintenanceWorker(worker.BaseWorker): super().__init__(desc=self.desc, **kwargs) def start(self, **kwargs): - super(MaintenanceWorker, self).start() + super().start() # NOTE(twilson) The super class will trigger the post_fork_initialize # in the driver, which starts the connection/IDL notify loop which # keeps the process from exiting def stop(self): """Stop service.""" - super(MaintenanceWorker, self).stop() + super().stop() def wait(self): """Wait for service to complete.""" - super(MaintenanceWorker, self).wait() + super().wait() @staticmethod def reset(): diff --git a/neutron/plugins/ml2/drivers/type_flat.py b/neutron/plugins/ml2/drivers/type_flat.py index fd11bc1b8f2..058c443c1f1 100644 --- a/neutron/plugins/ml2/drivers/type_flat.py +++ b/neutron/plugins/ml2/drivers/type_flat.py @@ -43,7 +43,7 @@ class FlatTypeDriver(helpers.BaseTypeDriver): """ def __init__(self): - super(FlatTypeDriver, self).__init__() + super().__init__() self._parse_networks(cfg.CONF.ml2_type_flat.flat_networks) def _parse_networks(self, entries): @@ -129,7 +129,7 @@ class FlatTypeDriver(helpers.BaseTypeDriver): physical_network) def get_mtu(self, physical_network): - seg_mtu = super(FlatTypeDriver, self).get_mtu() + seg_mtu = super().get_mtu() mtu = [] if seg_mtu > 0: mtu.append(seg_mtu) diff --git a/neutron/plugins/ml2/drivers/type_geneve.py b/neutron/plugins/ml2/drivers/type_geneve.py index 9341ff15616..4e342767d5c 100644 --- a/neutron/plugins/ml2/drivers/type_geneve.py +++ b/neutron/plugins/ml2/drivers/type_geneve.py @@ -33,8 +33,8 @@ driver_type.register_ml2_drivers_geneve_opts() class GeneveTypeDriver(type_tunnel.EndpointTunnelTypeDriver): def __init__(self): - super(GeneveTypeDriver, self).__init__(geneve_obj.GeneveAllocation, - geneve_obj.GeneveEndpoint) + super().__init__(geneve_obj.GeneveAllocation, + geneve_obj.GeneveEndpoint) self.max_encap_size = cfg.CONF.ml2_type_geneve.max_header_size self.model_segmentation_id = ( geneve_alloc_model.GeneveAllocation.geneve_vni) @@ -60,5 +60,5 @@ class GeneveTypeDriver(type_tunnel.EndpointTunnelTypeDriver): return self._add_endpoint(ip, host) def get_mtu(self, physical_network=None): - mtu = super(GeneveTypeDriver, self).get_mtu() + mtu = super().get_mtu() return mtu - self.max_encap_size if mtu else 0 diff --git a/neutron/plugins/ml2/drivers/type_gre.py b/neutron/plugins/ml2/drivers/type_gre.py index 45561acd4b1..28664c2fbc7 100644 --- a/neutron/plugins/ml2/drivers/type_gre.py +++ b/neutron/plugins/ml2/drivers/type_gre.py @@ -32,7 +32,7 @@ driver_type.register_ml2_drivers_gre_opts() class GreTypeDriver(type_tunnel.EndpointTunnelTypeDriver): def __init__(self): - super(GreTypeDriver, self).__init__( + super().__init__( gre_obj.GreAllocation, gre_obj.GreEndpoint) self.model_segmentation_id = gre_alloc_model.GreAllocation.gre_id @@ -58,5 +58,5 @@ class GreTypeDriver(type_tunnel.EndpointTunnelTypeDriver): return self._add_endpoint(ip, host) def get_mtu(self, physical_network=None): - mtu = super(GreTypeDriver, self).get_mtu(physical_network) + mtu = super().get_mtu(physical_network) return mtu - p_const.GRE_ENCAP_OVERHEAD if mtu else 0 diff --git a/neutron/plugins/ml2/drivers/type_tunnel.py b/neutron/plugins/ml2/drivers/type_tunnel.py index dcdadbcc394..7384a16bba4 100644 --- a/neutron/plugins/ml2/drivers/type_tunnel.py +++ b/neutron/plugins/ml2/drivers/type_tunnel.py @@ -56,7 +56,7 @@ class _TunnelTypeDriverBase(helpers.SegmentTypeDriver, metaclass=abc.ABCMeta): BULK_SIZE = 100 def __init__(self, model): - super(_TunnelTypeDriverBase, self).__init__(model) + super().__init__(model) self.segmentation_key = next(iter(self.primary_keys)) @abc.abstractmethod @@ -240,7 +240,7 @@ class _TunnelTypeDriverBase(helpers.SegmentTypeDriver, metaclass=abc.ABCMeta): raise exc.InvalidInput(error_message=msg) def get_mtu(self, physical_network=None): - seg_mtu = super(_TunnelTypeDriverBase, self).get_mtu() + seg_mtu = super().get_mtu() mtu = [] if seg_mtu > 0: mtu.append(seg_mtu) @@ -339,7 +339,7 @@ class ML2TunnelTypeDriver(_TunnelTypeDriverBase, metaclass=abc.ABCMeta): class EndpointTunnelTypeDriver(ML2TunnelTypeDriver): def __init__(self, segment_model, endpoint_model): - super(EndpointTunnelTypeDriver, self).__init__(segment_model) + super().__init__(segment_model) self.endpoint_model = endpoint_model.db_model self.segmentation_key = next(iter(self.primary_keys)) @@ -396,7 +396,7 @@ class EndpointTunnelTypeDriver(ML2TunnelTypeDriver): return endpoint -class TunnelRpcCallbackMixin(object): +class TunnelRpcCallbackMixin: def setup_tunnel_callback_mixin(self, notifier, type_manager): self._notifier = notifier @@ -489,7 +489,7 @@ class TunnelRpcCallbackMixin(object): raise exc.InvalidInput(error_message=msg) -class TunnelAgentRpcApiMixin(object): +class TunnelAgentRpcApiMixin: def _get_tunnel_update_topic(self): return topics.get_topic_name(self.topic, diff --git a/neutron/plugins/ml2/drivers/type_vlan.py b/neutron/plugins/ml2/drivers/type_vlan.py index b71447b6501..55e4f6f03d2 100644 --- a/neutron/plugins/ml2/drivers/type_vlan.py +++ b/neutron/plugins/ml2/drivers/type_vlan.py @@ -53,7 +53,7 @@ class VlanTypeDriver(helpers.SegmentTypeDriver): """ def __init__(self): - super(VlanTypeDriver, self).__init__(vlanalloc.VlanAllocation) + super().__init__(vlanalloc.VlanAllocation) self.model_segmentation_id = vlan_alloc_model.VlanAllocation.vlan_id self._parse_network_vlan_ranges() @@ -320,7 +320,7 @@ class VlanTypeDriver(helpers.SegmentTypeDriver): 'physical_network': physical_network}) def get_mtu(self, physical_network): - seg_mtu = super(VlanTypeDriver, self).get_mtu() + seg_mtu = super().get_mtu() mtu = [] if seg_mtu > 0: mtu.append(seg_mtu) diff --git a/neutron/plugins/ml2/drivers/type_vxlan.py b/neutron/plugins/ml2/drivers/type_vxlan.py index 4b77f35d256..242c4a00828 100644 --- a/neutron/plugins/ml2/drivers/type_vxlan.py +++ b/neutron/plugins/ml2/drivers/type_vxlan.py @@ -31,7 +31,7 @@ driver_type.register_ml2_drivers_vxlan_opts() class VxlanTypeDriver(type_tunnel.EndpointTunnelTypeDriver): def __init__(self): - super(VxlanTypeDriver, self).__init__( + super().__init__( vxlan_obj.VxlanAllocation, vxlan_obj.VxlanEndpoint) self.model_segmentation_id = ( vxlan_alloc_model.VxlanAllocation.vxlan_vni) @@ -59,5 +59,5 @@ class VxlanTypeDriver(type_tunnel.EndpointTunnelTypeDriver): return self._add_endpoint(ip, host, udp_port=udp_port) def get_mtu(self, physical_network=None): - mtu = super(VxlanTypeDriver, self).get_mtu() + mtu = super().get_mtu() return mtu - p_const.VXLAN_ENCAP_OVERHEAD if mtu else 0 diff --git a/neutron/plugins/ml2/extensions/dns_integration.py b/neutron/plugins/ml2/extensions/dns_integration.py index 338997daacb..edbf0d75ff3 100644 --- a/neutron/plugins/ml2/extensions/dns_integration.py +++ b/neutron/plugins/ml2/extensions/dns_integration.py @@ -309,13 +309,13 @@ class DNSExtensionDriver(api.ExtensionDriver): hostname = dns_name fqdn = dns_name if not dns_name.endswith('.'): - fqdn = '%s.%s' % (dns_name, dns_domain) + fqdn = '{}.{}'.format(dns_name, dns_domain) else: hostname = 'host-%s' % ip['ip_address'].replace( '.', '-').replace(':', '-') fqdn = hostname if dns_domain: - fqdn = '%s.%s' % (hostname, dns_domain) + fqdn = '{}.{}'.format(hostname, dns_domain) dns_assignment.append({'ip_address': ip['ip_address'], 'hostname': hostname, 'fqdn': fqdn}) @@ -417,7 +417,7 @@ class DNSDomainPortsExtensionDriver(DNSExtensionDriverML2): def extend_port_dict(self, session, db_data, response_data): response_data = ( - super(DNSDomainPortsExtensionDriver, self).extend_port_dict( + super().extend_port_dict( session, db_data, response_data)) dns_data_db = db_data.dns response_data[dns_apidef.DNSDOMAIN] = '' diff --git a/neutron/plugins/ml2/managers.py b/neutron/plugins/ml2/managers.py index 9fa1c186d8d..6e4fa2a39dd 100644 --- a/neutron/plugins/ml2/managers.py +++ b/neutron/plugins/ml2/managers.py @@ -51,9 +51,9 @@ class TypeManager(stevedore.named.NamedExtensionManager): LOG.info("Configured type driver names: %s", cfg.CONF.ml2.type_drivers) - super(TypeManager, self).__init__('neutron.ml2.type_drivers', - cfg.CONF.ml2.type_drivers, - invoke_on_load=True) + super().__init__('neutron.ml2.type_drivers', + cfg.CONF.ml2.type_drivers, + invoke_on_load=True) LOG.info("Loaded type driver names: %s", self.names()) self._register_types() self._check_tenant_network_types(cfg.CONF.ml2.tenant_network_types) @@ -417,7 +417,7 @@ class MechanismManager(stevedore.named.NamedExtensionManager): LOG.info("Configured mechanism driver names: %s", cfg.CONF.ml2.mechanism_drivers) - super(MechanismManager, self).__init__( + super().__init__( 'neutron.ml2.mechanism_drivers', cfg.CONF.ml2.mechanism_drivers, invoke_on_load=True, @@ -1095,10 +1095,10 @@ class ExtensionManager(stevedore.named.NamedExtensionManager): LOG.info("Configured extension driver names: %s", cfg.CONF.ml2.extension_drivers) - super(ExtensionManager, self).__init__('neutron.ml2.extension_drivers', - cfg.CONF.ml2.extension_drivers, - invoke_on_load=True, - name_order=True) + super().__init__('neutron.ml2.extension_drivers', + cfg.CONF.ml2.extension_drivers, + invoke_on_load=True, + name_order=True) LOG.info("Loaded extension driver names: %s", self.names()) self._register_drivers() diff --git a/neutron/plugins/ml2/ovo_rpc.py b/neutron/plugins/ml2/ovo_rpc.py index 6354fae3a29..55d08574f74 100644 --- a/neutron/plugins/ml2/ovo_rpc.py +++ b/neutron/plugins/ml2/ovo_rpc.py @@ -36,7 +36,7 @@ LOG = logging.getLogger(__name__) # deprecated, it could be possible to revert to the previous architecture using # preemptive threads. # [1] https://review.opendev.org/c/openstack/neutron/+/926922 -class _ObjectChangeHandler(object): +class _ObjectChangeHandler: def __init__(self, resource, object_class, resource_push_api): self._resource = resource @@ -105,7 +105,7 @@ class _ObjectChangeHandler(object): {'res': self._resource, 'e': e}) -class OVOServerRpcInterface(object): +class OVOServerRpcInterface: """ML2 server-side RPC interface. Generates RPC callback notifications on ML2 object changes. diff --git a/neutron/plugins/ml2/plugin.py b/neutron/plugins/ml2/plugin.py index f0a23b7fc82..55cf448e764 100644 --- a/neutron/plugins/ml2/plugin.py +++ b/neutron/plugins/ml2/plugin.py @@ -278,7 +278,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, query_hook=None, filter_hook=None, result_filters=_ml2_port_result_filter_hook) - return super(Ml2Plugin, cls).__new__(cls, *args, **kwargs) + return super().__new__(cls, *args, **kwargs) @resource_registry.tracked_resources( network=models_v2.Network, @@ -292,7 +292,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, self.type_manager = managers.TypeManager() self.extension_manager = managers.ExtensionManager() self.mechanism_manager = managers.MechanismManager() - super(Ml2Plugin, self).__init__() + super().__init__() self.type_manager.initialize() self.extension_manager.initialize() self.mechanism_manager.initialize() @@ -596,7 +596,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, self._clear_port_binding(mech_context, binding, port, original_host) port['status'] = const.PORT_STATUS_DOWN - super(Ml2Plugin, self).update_port( + super().update_port( mech_context.plugin_context, port['id'], {port_def.RESOURCE_NAME: {'status': const.PORT_STATUS_DOWN}}) @@ -1276,7 +1276,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, self._update_provider_network_attributes( context, original_network, net_data) - updated_network = super(Ml2Plugin, self).update_network( + updated_network = super().update_network( context, id, network, db_network=db_network) self.extension_manager.process_update_network(context, net_data, updated_network) @@ -1337,7 +1337,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, def get_networks(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): with db_api.CONTEXT_READER.using(context): - nets_db = super(Ml2Plugin, self)._get_networks( + nets_db = super()._get_networks( context, filters, None, sorts, limit, marker, page_reverse) net_data = [] @@ -1369,7 +1369,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, def delete_network(self, context, id): # the only purpose of this override is to protect this from being # called inside of a transaction. - return super(Ml2Plugin, self).delete_network(context, id) + return super().delete_network(context, id) # NOTE(mgoddard): Use a priority of zero to ensure this handler runs before # other precommit handlers. This is necessary to ensure we avoid another @@ -1493,7 +1493,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, def delete_subnet(self, context, id): # the only purpose of this override is to protect this from being # called inside of a transaction. - return super(Ml2Plugin, self).delete_subnet(context, id) + return super().delete_subnet(context, id) # NOTE(mgoddard): Use a priority of zero to ensure this handler runs before # other precommit handlers. This is necessary to ensure we avoid another @@ -1875,7 +1875,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, # that port does not have any security groups already on it. filters = {'port_id': [id]} security_groups = ( - super(Ml2Plugin, self)._get_port_security_group_bindings( + super()._get_port_security_group_bindings( context, filters)) if security_groups: raise psec_exc.PortSecurityPortHasSecurityGroup() @@ -1929,9 +1929,9 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, port_db, attrs, binding) need_port_update_notify |= mac_address_updated original_port = self._make_port_dict(port_db) - updated_port = super(Ml2Plugin, self).update_port(context, id, - port, - db_port=port_db) + updated_port = super().update_port(context, id, + port, + db_port=port_db) self.extension_manager.process_update_port(context, attrs, updated_port) self._portsec_ext_port_update_processing(updated_port, context, @@ -2241,7 +2241,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, LOG.debug("Calling delete_port for %(port_id)s owned by %(owner)s", {"port_id": id, "owner": device_owner}) - super(Ml2Plugin, self).delete_port(context, id, port) + super().delete_port(context, id, port) self._post_delete_port( context, port, router_ids, bound_mech_contexts) @@ -2517,9 +2517,9 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, @db_api.retry_if_session_inactive() def get_ports_from_devices(self, context, devices): - port_ids_to_devices = dict( - (self._device_to_port_id(context, device), device) - for device in devices) + port_ids_to_devices = { + self._device_to_port_id(context, device): device + for device in devices} port_ids = list(port_ids_to_devices.keys()) ports = db.get_ports_and_sgs(context, port_ids) for port in ports: @@ -2563,9 +2563,9 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, filters['id'] = [entry['port_id'] for entry in port_bindings] fixed_ips = filters.get('fixed_ips', {}) ip_addresses_s = fixed_ips.get('ip_address_substr') - query = super(Ml2Plugin, self)._get_ports_query(context, *args, - filters=filters, - **kwargs) + query = super()._get_ports_query(context, *args, + filters=filters, + **kwargs) if ip_addresses_s: substr_filter = or_(*[models_v2.Port.fixed_ips.any( models_v2.IPAllocation.ip_address.like('%%%s%%' % ip)) @@ -2732,7 +2732,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, mech_context._binding, port_dict, original_host) port_dict['status'] = const.PORT_STATUS_DOWN - super(Ml2Plugin, self).update_port( + super().update_port( mech_context.plugin_context, port_dict['id'], {port_def.RESOURCE_NAME: {'status': const.PORT_STATUS_DOWN}}) self._update_port_dict_binding(port_dict, @@ -2888,7 +2888,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, self._clear_port_binding(original_context, active_binding, port_dict, active_binding.host) port_dict['status'] = const.PORT_STATUS_DOWN - super(Ml2Plugin, self).update_port( + super().update_port( context, port_dict['id'], {port_def.RESOURCE_NAME: {'status': const.PORT_STATUS_DOWN}}) levels = db.get_binding_level_objs(context, port_id, diff --git a/neutron/plugins/ml2/rpc.py b/neutron/plugins/ml2/rpc.py index be298e73634..94ff8ad9fdd 100644 --- a/neutron/plugins/ml2/rpc.py +++ b/neutron/plugins/ml2/rpc.py @@ -69,7 +69,7 @@ class RpcCallbacks(type_tunnel.TunnelRpcCallbackMixin): def __init__(self, notifier, type_manager): self.setup_tunnel_callback_mixin(notifier, type_manager) - super(RpcCallbacks, self).__init__() + super().__init__() def _get_new_status(self, host, port_context): port = port_context.current diff --git a/neutron/policy.py b/neutron/policy.py index 79c4307c51a..64ed68d9005 100644 --- a/neutron/policy.py +++ b/neutron/policy.py @@ -179,7 +179,7 @@ def _build_list_of_subattrs_rule(attr_name, attribute_value, action): if isinstance(sub_attr, dict): for k in sub_attr: rules.append(policy.RuleCheck( - 'rule', '%s:%s:%s' % (action, attr_name, k))) + 'rule', '{}:{}:{}'.format(action, attr_name, k))) if rules: return policy.AndCheck(rules) @@ -224,7 +224,7 @@ def _build_match_rule(action, target, pluralized): attribute = res_map[resource][attribute_name] if 'enforce_policy' in attribute: attr_rule = policy.RuleCheck( - 'rule', '%s:%s' % (action, attribute_name)) + 'rule', '{}:{}'.format(action, attribute_name)) # Build match entries for sub-attributes if _should_validate_sub_attributes( attribute, target[attribute_name]): @@ -274,10 +274,10 @@ class OwnerCheck(policy.Check): match) LOG.exception(err_reason) raise exceptions.PolicyInitError( - policy="%s:%s" % (kind, match), + policy="{}:{}".format(kind, match), reason=err_reason) self._cache = cache._get_memory_cache_region(expiration_time=5) - super(OwnerCheck, self).__init__(kind, match) + super().__init__(kind, match) # NOTE(slaweq): It seems we need to have it like that, otherwise we hit # TypeError: cannot pickle '_thread.RLock' object @@ -341,13 +341,14 @@ class OwnerCheck(policy.Check): self.target_field) LOG.error(err_reason) raise exceptions.PolicyCheckError( - policy="%s:%s" % (self.kind, self.match), + policy="{}:{}".format(self.kind, self.match), reason=err_reason) parent_foreign_key = _RESOURCE_FOREIGN_KEYS.get( "%ss" % parent_res, None) if parent_res == constants.EXT_PARENT_PREFIX: for resource in service_const.EXT_PARENT_RESOURCE_MAPPING: - key = "%s_%s_id" % (constants.EXT_PARENT_PREFIX, resource) + key = "{}_{}_id".format( + constants.EXT_PARENT_PREFIX, resource) if key in target: parent_foreign_key = key parent_res = resource @@ -358,7 +359,7 @@ class OwnerCheck(policy.Check): {'match': self.match, 'res': parent_res}) LOG.error(err_reason) raise exceptions.PolicyCheckError( - policy="%s:%s" % (self.kind, self.match), + policy="{}:{}".format(self.kind, self.match), reason=err_reason) target[self.target_field] = self._extract( @@ -380,8 +381,8 @@ class FieldCheck(policy.Check): resource, field_value = match.split(':', 1) field, value = field_value.split('=', 1) - super(FieldCheck, self).__init__(kind, '%s:%s:%s' % - (resource, field, value)) + super().__init__(kind, '%s:%s:%s' % + (resource, field, value)) # Value might need conversion - we need help from the attribute map diff --git a/neutron/privileged/agent/linux/ip_lib.py b/neutron/privileged/agent/linux/ip_lib.py index f889b795dd1..39213ed23a4 100644 --- a/neutron/privileged/agent/linux/ip_lib.py +++ b/neutron/privileged/agent/linux/ip_lib.py @@ -58,7 +58,7 @@ class NetworkNamespaceNotFound(RuntimeError): message = _("Network namespace %(netns_name)s could not be found.") def __init__(self, netns_name): - super(NetworkNamespaceNotFound, self).__init__( + super().__init__( self.message % {'netns_name': netns_name}) @@ -74,7 +74,7 @@ class NetworkInterfaceNotFound(RuntimeError): # raised exception. message = message or self.message % { 'device': device, 'namespace': namespace} - super(NetworkInterfaceNotFound, self).__init__(message) + super().__init__(message) class InterfaceOperationNotSupported(RuntimeError): @@ -89,7 +89,7 @@ class InterfaceOperationNotSupported(RuntimeError): # raised exception. message = message or self.message % { 'device': device, 'namespace': namespace} - super(InterfaceOperationNotSupported, self).__init__(message) + super().__init__(message) class InvalidArgument(RuntimeError): @@ -104,7 +104,7 @@ class InvalidArgument(RuntimeError): # raised exception. message = message or self.message % {'device': device, 'namespace': namespace} - super(InvalidArgument, self).__init__(message) + super().__init__(message) class IpAddressAlreadyExists(RuntimeError): @@ -117,7 +117,7 @@ class IpAddressAlreadyExists(RuntimeError): # and will call it always with passing only message from originally # raised exception. message = message or self.message % {'ip': ip, 'device': device} - super(IpAddressAlreadyExists, self).__init__(message) + super().__init__(message) class InterfaceAlreadyExists(RuntimeError): @@ -130,7 +130,7 @@ class InterfaceAlreadyExists(RuntimeError): # and will call it always with passing only message from originally # raised exception. message = message or self.message % {'device': device} - super(InterfaceAlreadyExists, self).__init__(message) + super().__init__(message) def _make_route_dict(destination, nexthop, device, scope): @@ -689,8 +689,8 @@ def list_ip_rules(namespace, ip_version, match=None, **kwargs): family=_IP_VERSION_FAMILY_MAP[ip_version], match=match, **kwargs)) for rule in rules: - rule['attrs'] = dict( - (item[0], item[1]) for item in rule['attrs']) + rule['attrs'] = { + item[0]: item[1] for item in rule['attrs']} return rules except OSError as e: diff --git a/neutron/privileged/agent/linux/netlink_lib.py b/neutron/privileged/agent/linux/netlink_lib.py index 32ea0507d47..2042f1d46c2 100644 --- a/neutron/privileged/agent/linux/netlink_lib.py +++ b/neutron/privileged/agent/linux/netlink_lib.py @@ -125,7 +125,7 @@ NFCT_CALLBACK = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p) -class ConntrackManager(object): +class ConntrackManager: def __init__(self, family_socket=None): self.family_socket = family_socket self.set_functions = { @@ -278,7 +278,7 @@ def list_entries(zone): with ConntrackManager(nl_constants.IPVERSION_SOCKET[ipversion]) \ as conntrack: raw_entries = [entry for entry in conntrack.list_entries() if - re.search(r'\bzone={}\b'.format(zone), entry) is + re.search(fr'\bzone={zone}\b', entry) is not None] for raw_entry in raw_entries: diff --git a/neutron/privileged/agent/linux/tc_lib.py b/neutron/privileged/agent/linux/tc_lib.py index ecc85a0ee2d..d4477ab10ca 100644 --- a/neutron/privileged/agent/linux/tc_lib.py +++ b/neutron/privileged/agent/linux/tc_lib.py @@ -36,7 +36,7 @@ class TrafficControlClassNotFound(RuntimeError): def __init__(self, message=None, classid=None, namespace=None): message = message or self.message % { 'classid': classid, 'namespace': namespace} - super(TrafficControlClassNotFound, self).__init__(message) + super().__init__(message) @privileged.default.entrypoint diff --git a/neutron/privileged/agent/ovsdb/native/helpers.py b/neutron/privileged/agent/ovsdb/native/helpers.py index 4420aeaf968..f5650c07589 100644 --- a/neutron/privileged/agent/ovsdb/native/helpers.py +++ b/neutron/privileged/agent/ovsdb/native/helpers.py @@ -23,8 +23,8 @@ def _connection_to_manager_uri(conn_uri): ip, port = netutils.parse_host_port(addr) if port is not None: ip = netutils.escape_ipv6(ip) - return 'p%s:%s:%s' % (proto, port, ip) - return 'p%s:%s' % (proto, addr) + return 'p{}:{}:{}'.format(proto, port, ip) + return 'p{}:{}'.format(proto, addr) @privileged.ovs_vsctl_cmd.entrypoint diff --git a/neutron/profiling/profiled_decorator.py b/neutron/profiling/profiled_decorator.py index 69a84aeefed..9211ed58867 100644 --- a/neutron/profiling/profiled_decorator.py +++ b/neutron/profiling/profiled_decorator.py @@ -45,7 +45,7 @@ def profile(f): if not cfg.CONF.enable_code_profiling: return f(*args, **kwargs) - profid = "%s.%s" % (f.__module__, f.__name__) + profid = "{}.{}".format(f.__module__, f.__name__) profiler = cProfile.Profile() start_time = datetime.now() try: diff --git a/neutron/quota/__init__.py b/neutron/quota/__init__.py index 2ad86232bbc..587a08f6e4c 100644 --- a/neutron/quota/__init__.py +++ b/neutron/quota/__init__.py @@ -27,7 +27,7 @@ LOG = logging.getLogger(__name__) quota.register_quota_opts(quota.core_quota_opts) -class QuotaEngine(object): +class QuotaEngine: """Represent the set of recognized quotas.""" _instance = None @@ -81,8 +81,8 @@ class QuotaEngine(object): requested_resources = set(deltas.keys()) all_resources = resource_registry.get_all_resources() - managed_resources = set([res for res in all_resources.keys() - if res in requested_resources]) + managed_resources = {res for res in all_resources.keys() + if res in requested_resources} # Make sure we accounted for all of them... unknown_resources = requested_resources - managed_resources @@ -130,9 +130,9 @@ class QuotaEngine(object): # TODO(salv-orlando): Deprecate calls to this API # Verify that resources are managed by the quota engine requested_resources = set(values.keys()) - managed_resources = set([res for res in - resource_registry.get_all_resources() - if res in requested_resources]) + managed_resources = {res for res in + resource_registry.get_all_resources() + if res in requested_resources} # Make sure we accounted for all of them... unknown_resources = requested_resources - managed_resources diff --git a/neutron/quota/resource.py b/neutron/quota/resource.py index 82a3412151a..baeb5e2e660 100644 --- a/neutron/quota/resource.py +++ b/neutron/quota/resource.py @@ -58,7 +58,7 @@ def _count_resource(context, collection_name, project_id): _('No plugins that support counting %s found.') % collection_name) -class BaseResource(object, metaclass=abc.ABCMeta): +class BaseResource(metaclass=abc.ABCMeta): """Describe a single resource for quota checking.""" def __init__(self, name, flag, plural_name=None): @@ -147,7 +147,7 @@ class CountableResource(BaseResource): Dashes are always converted to underscores. """ - super(CountableResource, self).__init__( + super().__init__( name, flag=flag, plural_name=plural_name) self._count_func = count @@ -188,7 +188,7 @@ class TrackedResource(BaseResource): Dashes are always converted to underscores. """ - super(TrackedResource, self).__init__( + super().__init__( name, flag=flag, plural_name=plural_name) # Register events for addition/removal of records in the model class # As project_id is immutable for all Neutron objects there is no need diff --git a/neutron/quota/resource_registry.py b/neutron/quota/resource_registry.py index 73a925c1ad6..e207e4d3869 100644 --- a/neutron/quota/resource_registry.py +++ b/neutron/quota/resource_registry.py @@ -99,7 +99,7 @@ def mark_resources_dirty(f): return wrapper -class tracked_resources(object): +class tracked_resources: """Decorator for specifying resources for which usage should be tracked. A plugin class can use this decorator to specify for which resources @@ -126,7 +126,7 @@ class tracked_resources(object): return wrapper -class ResourceRegistry(object): +class ResourceRegistry: """Registry for resource subject to quota limits. This class keeps track of Neutron resources for which quota limits are diff --git a/neutron/scheduler/base_resource_filter.py b/neutron/scheduler/base_resource_filter.py index 133ce7271a4..e1aa0541fc4 100644 --- a/neutron/scheduler/base_resource_filter.py +++ b/neutron/scheduler/base_resource_filter.py @@ -21,7 +21,7 @@ from oslo_db import exception as db_exc from neutron.common import utils as n_utils -class BaseResourceFilter(object, metaclass=abc.ABCMeta): +class BaseResourceFilter(metaclass=abc.ABCMeta): """Encapsulate logic that is specific to the resource type.""" @abc.abstractmethod def filter_agents(self, plugin, context, resource): diff --git a/neutron/scheduler/base_scheduler.py b/neutron/scheduler/base_scheduler.py index 6b3abb14167..b17e6c1f1cd 100644 --- a/neutron/scheduler/base_scheduler.py +++ b/neutron/scheduler/base_scheduler.py @@ -22,7 +22,7 @@ from oslo_log import log as logging LOG = logging.getLogger(__name__) -class BaseScheduler(object, metaclass=abc.ABCMeta): +class BaseScheduler(metaclass=abc.ABCMeta): """The base scheduler (agnostic to resource type). Child classes of BaseScheduler must define the self.resource_filter to filter agents of diff --git a/neutron/scheduler/dhcp_agent_scheduler.py b/neutron/scheduler/dhcp_agent_scheduler.py index ffa48045b4f..f1ecdd63df9 100644 --- a/neutron/scheduler/dhcp_agent_scheduler.py +++ b/neutron/scheduler/dhcp_agent_scheduler.py @@ -34,7 +34,7 @@ from neutron.scheduler import base_scheduler LOG = logging.getLogger(__name__) -class AutoScheduler(object): +class AutoScheduler: def auto_schedule_networks(self, plugin, context, host): """Schedule non-hosted networks to the DHCP agent on the specified @@ -100,8 +100,8 @@ class AutoScheduler(object): self.resource_filter.bind( context, [agent], net_id, force_scheduling=is_routed_network) - debug_data.append('(%s, %s, %s)' % (agent['agent_type'], - agent['host'], net_id)) + debug_data.append('({}, {}, {})'.format(agent['agent_type'], + agent['host'], net_id)) LOG.debug('Resources bound (agent type, host, resource id): %s', ', '.join(debug_data)) return True @@ -110,13 +110,13 @@ class AutoScheduler(object): class ChanceScheduler(base_scheduler.BaseChanceScheduler, AutoScheduler): def __init__(self): - super(ChanceScheduler, self).__init__(DhcpFilter()) + super().__init__(DhcpFilter()) class WeightScheduler(base_scheduler.BaseWeightScheduler, AutoScheduler): def __init__(self): - super(WeightScheduler, self).__init__(DhcpFilter()) + super().__init__(DhcpFilter()) class AZAwareWeightScheduler(WeightScheduler): @@ -145,7 +145,7 @@ class AZAwareWeightScheduler(WeightScheduler): # resource_hostable_agents should be a list with agents in the order of # their weight. resource_hostable_agents = ( - super(AZAwareWeightScheduler, self).select( + super().select( plugin, context, resource_hostable_agents, resource_hosted_agents, len(resource_hostable_agents))) for agent in resource_hostable_agents: @@ -231,7 +231,7 @@ class DhcpFilter(base_resource_filter.BaseResourceFilter): {'network_id': network_id, 'agent_id': agent_id, 'binding_index': binding_index}) - super(DhcpFilter, self).bind(context, bound_agents, network_id) + super().bind(context, bound_agents, network_id) def filter_agents(self, plugin, context, network): """Return the agents that can host the network. diff --git a/neutron/scheduler/l3_agent_scheduler.py b/neutron/scheduler/l3_agent_scheduler.py index 3e7fa018592..62bbf16cfc6 100644 --- a/neutron/scheduler/l3_agent_scheduler.py +++ b/neutron/scheduler/l3_agent_scheduler.py @@ -38,7 +38,7 @@ LOG = logging.getLogger(__name__) cfg.CONF.register_opts(l3_hamode_db.L3_HA_OPTS) -class L3Scheduler(object, metaclass=abc.ABCMeta): +class L3Scheduler(metaclass=abc.ABCMeta): def __init__(self): self.max_ha_agents = cfg.CONF.max_l3_agents_per_router @@ -376,13 +376,13 @@ class AZLeastRoutersScheduler(LeastRoutersScheduler): if not target_routers: return [] - return super(AZLeastRoutersScheduler, self)._get_routers_can_schedule( + return super()._get_routers_can_schedule( plugin, context, target_routers, l3_agent) def _get_candidates(self, plugin, context, sync_router): """Overwrite L3Scheduler's method to filter by availability zone.""" all_candidates = ( - super(AZLeastRoutersScheduler, self)._get_candidates( + super()._get_candidates( plugin, context, sync_router)) candidates = [] diff --git a/neutron/scheduler/l3_ovn_scheduler.py b/neutron/scheduler/l3_ovn_scheduler.py index 36bfd75f4ec..338095417bf 100644 --- a/neutron/scheduler/l3_ovn_scheduler.py +++ b/neutron/scheduler/l3_ovn_scheduler.py @@ -29,7 +29,7 @@ OVN_SCHEDULER_CHANCE = 'chance' OVN_SCHEDULER_LEAST_LOADED = 'leastloaded' -class OVNGatewayScheduler(object, metaclass=abc.ABCMeta): +class OVNGatewayScheduler(metaclass=abc.ABCMeta): def __init__(self): pass @@ -142,8 +142,9 @@ class OVNGatewayChanceScheduler(OVNGatewayScheduler): def select(self, nb_idl, sb_idl, gateway_name, candidates=None, existing_chassis=None, target_lrouter=None): - return self._schedule_gateway(nb_idl, sb_idl, gateway_name, - candidates, existing_chassis, target_lrouter) + return self._schedule_gateway( + nb_idl, sb_idl, gateway_name, + candidates, existing_chassis, target_lrouter) def _select_gateway_chassis(self, nb_idl, sb_idl, candidates, priority_min, priority_max, target_lrouter): diff --git a/neutron/service.py b/neutron/service.py index c1bca3cc5bd..c53ce4f983f 100644 --- a/neutron/service.py +++ b/neutron/service.py @@ -51,7 +51,7 @@ service.register_service_opts(service.RPC_EXTRA_OPTS) LOG = logging.getLogger(__name__) -class WsgiService(object): +class WsgiService: """Base class for WSGI based services. For each api you define, you must also define these flags: @@ -75,7 +75,7 @@ class NeutronApiService(WsgiService): """Class for neutron-api service.""" def __init__(self, app_name): profiler.setup('neutron-server', cfg.CONF.host) - super(NeutronApiService, self).__init__(app_name) + super().__init__(app_name) @classmethod def create(cls, app_name='neutron'): @@ -103,7 +103,7 @@ class RpcWorker(neutron_worker.NeutronBaseWorker): desc = 'rpc worker' def __init__(self, plugins, worker_process_count=1): - super(RpcWorker, self).__init__( + super().__init__( worker_process_count=worker_process_count, desc=self.desc, ) @@ -112,7 +112,7 @@ class RpcWorker(neutron_worker.NeutronBaseWorker): self._servers = [] def start(self): - super(RpcWorker, self).start(desc=self.desc) + super().start(desc=self.desc) for plugin in self._plugins: if hasattr(plugin, self.start_listeners_method): try: @@ -238,7 +238,7 @@ def _get_ovn_maintenance_worker(): class AllServicesNeutronWorker(neutron_worker.NeutronBaseWorker): def __init__(self, services, worker_process_count=1): - super(AllServicesNeutronWorker, self).__init__(worker_process_count) + super().__init__(worker_process_count) self._services = services for srv in self._services: self._check_base_worker_service(srv) @@ -251,7 +251,7 @@ class AllServicesNeutronWorker(neutron_worker.NeutronBaseWorker): # re-write the process title already defined and set by this class. srv.set_proctitle = 'off' self._launcher.launch_service(srv) - super(AllServicesNeutronWorker, self).start(desc="services worker") + super().start(desc="services worker") def stop(self): self._launcher.stop() @@ -400,11 +400,11 @@ class Service(n_rpc.Service): self.saved_args, self.saved_kwargs = args, kwargs self.timers = [] profiler.setup(binary, host) - super(Service, self).__init__(host, topic, manager=self.manager) + super().__init__(host, topic, manager=self.manager) def start(self): self.manager.init_host() - super(Service, self).start() + super().start() if self.report_interval: pulse = loopingcall.FixedIntervalLoopingCall(self.report_state) pulse.start(interval=self.report_interval, @@ -471,7 +471,7 @@ class Service(n_rpc.Service): self.stop() def stop(self): - super(Service, self).stop() + super().stop() for x in self.timers: try: x.stop() @@ -481,7 +481,7 @@ class Service(n_rpc.Service): self.manager.stop() def wait(self): - super(Service, self).wait() + super().wait() for x in self.timers: try: x.wait() diff --git a/neutron/services/auto_allocate/db.py b/neutron/services/auto_allocate/db.py index 1d6d77519e8..8cd946aced1 100644 --- a/neutron/services/auto_allocate/db.py +++ b/neutron/services/auto_allocate/db.py @@ -80,13 +80,13 @@ def _ensure_external_network_default_value_callback( @resource_extend.has_resource_extenders -class AutoAllocatedTopologyMixin(object): +class AutoAllocatedTopologyMixin: def __new__(cls, *args, **kwargs): # NOTE(kevinbenton): we subscribe on object construction because # the tests blow away the callback manager for each run - new = super(AutoAllocatedTopologyMixin, cls).__new__(cls, *args, - **kwargs) + new = super().__new__(cls, *args, + **kwargs) registry.subscribe(_ensure_external_network_default_value_callback, resources.NETWORK, events.PRECOMMIT_UPDATE) registry.subscribe(_ensure_external_network_default_value_callback, diff --git a/neutron/services/conntrack_helper/plugin.py b/neutron/services/conntrack_helper/plugin.py index 2c68c75eafd..d05d1b873df 100644 --- a/neutron/services/conntrack_helper/plugin.py +++ b/neutron/services/conntrack_helper/plugin.py @@ -57,7 +57,7 @@ class Plugin(l3_conntrack_helper.ConntrackHelperPluginBase): __filter_validation_support = True def __init__(self): - super(Plugin, self).__init__() + super().__init__() self.push_api = resources_rpc.ResourcesPushRpcApi() self.l3_plugin = directory.get_plugin(constants.L3) self.core_plugin = directory.get_plugin() diff --git a/neutron/services/externaldns/driver.py b/neutron/services/externaldns/driver.py index 3e627fd6078..dc5034fffe9 100644 --- a/neutron/services/externaldns/driver.py +++ b/neutron/services/externaldns/driver.py @@ -23,7 +23,7 @@ from neutron import manager LOG = log.getLogger(__name__) -class ExternalDNSService(object, metaclass=abc.ABCMeta): +class ExternalDNSService(metaclass=abc.ABCMeta): """Interface definition for an external dns service driver.""" def __init__(self): diff --git a/neutron/services/externaldns/drivers/designate/driver.py b/neutron/services/externaldns/drivers/designate/driver.py index e826c74cfc9..6b85e6dba29 100644 --- a/neutron/services/externaldns/drivers/designate/driver.py +++ b/neutron/services/externaldns/drivers/designate/driver.py @@ -76,7 +76,7 @@ class Designate(driver.ExternalDNSService): if not CONF.designate.allow_reverse_dns_lookup: return # Set up the PTR records - recordset_name = '%s.%s' % (dns_name, dns_domain) + recordset_name = '{}.{}'.format(dns_name, dns_domain) ptr_zone_email = 'admin@%s' % dns_domain[:-1] if CONF.designate.ptr_zone_email: ptr_zone_email = CONF.designate.ptr_zone_email @@ -139,12 +139,14 @@ class Designate(driver.ExternalDNSService): client, admin_client = get_clients(context) try: ids_to_delete = self._get_ids_ips_to_delete( - dns_domain, '%s.%s' % (dns_name, dns_domain), records, client) + dns_domain, '{}.{}'.format( + dns_name, dns_domain), records, client) except dns_exc.DNSDomainNotFound: # Try whether we have admin powers and can see all projects client = get_all_projects_client(context) ids_to_delete = self._get_ids_ips_to_delete( - dns_domain, '%s.%s' % (dns_name, dns_domain), records, client) + dns_domain, '{}.{}'.format( + dns_name, dns_domain), records, client) for _id in ids_to_delete: client.recordsets.delete(dns_domain, _id) diff --git a/neutron/services/l3_router/l3_router_plugin.py b/neutron/services/l3_router/l3_router_plugin.py index 1bd7bbfb73b..e5013d54f68 100644 --- a/neutron/services/l3_router/l3_router_plugin.py +++ b/neutron/services/l3_router/l3_router_plugin.py @@ -131,7 +131,7 @@ class L3RouterPlugin(service_base.ServicePluginBase, self.router_scheduler = importutils.import_object( cfg.CONF.router_scheduler_driver) self.add_periodic_l3_agent_status_check() - super(L3RouterPlugin, self).__init__() + super().__init__() if 'dvr' in self.supported_extension_aliases: l3_dvrscheduler_db.subscribe() if 'l3-ha' in self.supported_extension_aliases: @@ -188,7 +188,7 @@ class L3RouterPlugin(service_base.ServicePluginBase, leveraging the l3 agent, the initial status for the floating IP object will be DOWN. """ - return super(L3RouterPlugin, self).create_floatingip( + return super().create_floatingip( context, floatingip, initial_status=n_const.FLOATINGIP_STATUS_DOWN) diff --git a/neutron/services/l3_router/service_providers/base.py b/neutron/services/l3_router/service_providers/base.py index 519615dbf3a..6d1df94027e 100644 --- a/neutron/services/l3_router/service_providers/base.py +++ b/neutron/services/l3_router/service_providers/base.py @@ -15,7 +15,7 @@ from neutron._i18n import _ -class _FeatureFlag(object): +class _FeatureFlag: def is_compatible(self, value): if value == self.requires: @@ -37,7 +37,7 @@ OPTIONAL = _FeatureFlag(supports=True, requires=False) MANDATORY = _FeatureFlag(supports=True, requires=True) -class L3ServiceProvider(object): +class L3ServiceProvider: """Base class for L3 service provider drivers. On __init__ this will be given a handle to the l3 plugin. It is then the diff --git a/neutron/services/l3_router/service_providers/driver_controller.py b/neutron/services/l3_router/service_providers/driver_controller.py index 7dd59b18928..350a7b06e04 100644 --- a/neutron/services/l3_router/service_providers/driver_controller.py +++ b/neutron/services/l3_router/service_providers/driver_controller.py @@ -33,7 +33,7 @@ LOG = logging.getLogger(__name__) @registry.has_registry_receivers -class DriverController(object): +class DriverController: """Driver controller for the L3 service plugin. This component is responsible for dispatching router requests to L3 @@ -257,7 +257,7 @@ class _LegacyPlusProviderConfiguration( # loads up ha, dvr, and single_node service providers automatically. # If an operator has setup explicit values that conflict with these, # the operator defined values will take priority. - super(_LegacyPlusProviderConfiguration, self).__init__( + super().__init__( svc_type=plugin_constants.L3) for name, driver in (('dvrha', 'dvrha.DvrHaDriver'), ('dvr', 'dvr.DvrDriver'), ('ha', 'ha.HaDriver'), diff --git a/neutron/services/local_ip/local_ip_plugin.py b/neutron/services/local_ip/local_ip_plugin.py index 642be308b2a..3ae25009e07 100644 --- a/neutron/services/local_ip/local_ip_plugin.py +++ b/neutron/services/local_ip/local_ip_plugin.py @@ -30,7 +30,7 @@ class LocalIPPlugin(local_ip_db.LocalIPDbMixin): __filter_validation_support = True def __init__(self): - super(LocalIPPlugin, self).__init__() + super().__init__() self._resource_rpc = resources_rpc.ResourcesPushRpcApi() def create_local_ip_port_association(self, context, local_ip_id, @@ -42,7 +42,6 @@ class LocalIPPlugin(local_ip_db.LocalIPDbMixin): def delete_local_ip_port_association(self, context, fixed_port_id, local_ip_id): - lip_assoc = super( - LocalIPPlugin, self).delete_local_ip_port_association( + lip_assoc = super().delete_local_ip_port_association( context, fixed_port_id, local_ip_id) self._resource_rpc.push(context, [lip_assoc], rpc_events.DELETED) diff --git a/neutron/services/logapi/agent/l3/base.py b/neutron/services/logapi/agent/l3/base.py index a37eaa0d6ce..25ca9739293 100644 --- a/neutron/services/logapi/agent/l3/base.py +++ b/neutron/services/logapi/agent/l3/base.py @@ -29,7 +29,7 @@ LOG = logging.getLogger(__name__) log_cfg.register_log_driver_opts() -class L3LoggingExtensionBase(object): +class L3LoggingExtensionBase: """Base class for l3 logging extension like SNATLogExtension, FWaaSV2LogExtension diff --git a/neutron/services/logapi/agent/log_extension.py b/neutron/services/logapi/agent/log_extension.py index 757f65cfdea..16eeb8066c1 100644 --- a/neutron/services/logapi/agent/log_extension.py +++ b/neutron/services/logapi/agent/log_extension.py @@ -33,7 +33,7 @@ log_cfg.register_log_driver_opts() LOGGING_DRIVERS_NAMESPACE = 'neutron.services.logapi.drivers' -class LoggingDriver(object, metaclass=abc.ABCMeta): +class LoggingDriver(metaclass=abc.ABCMeta): """Defines abstract interface for logging driver""" # specific logging types are supported diff --git a/neutron/services/logapi/api_base.py b/neutron/services/logapi/api_base.py index 611e0a4a7c4..42cbbe8acd0 100644 --- a/neutron/services/logapi/api_base.py +++ b/neutron/services/logapi/api_base.py @@ -13,7 +13,7 @@ import abc -class LoggingApiBase(object, metaclass=abc.ABCMeta): +class LoggingApiBase(metaclass=abc.ABCMeta): """Logging API methods""" @abc.abstractmethod diff --git a/neutron/services/logapi/common/validators.py b/neutron/services/logapi/common/validators.py index 1ac4684db1a..deb30931f25 100644 --- a/neutron/services/logapi/common/validators.py +++ b/neutron/services/logapi/common/validators.py @@ -83,7 +83,7 @@ def validate_log_type_for_port(log_type, port): return False -class ResourceValidateRequest(object): +class ResourceValidateRequest: _instance = None diff --git a/neutron/services/logapi/drivers/manager.py b/neutron/services/logapi/drivers/manager.py index 69a2cd620eb..e08a6664ce3 100644 --- a/neutron/services/logapi/drivers/manager.py +++ b/neutron/services/logapi/drivers/manager.py @@ -44,11 +44,11 @@ def _get_param(args, kwargs, name, index): raise log_exc.LogapiDriverException(exception_msg=msg) -class ResourceCallBackBase(object): +class ResourceCallBackBase: def __new__(cls, *args, **kwargs): if not hasattr(cls, '_instance'): - cls._instance = super(ResourceCallBackBase, cls).__new__(cls) + cls._instance = super().__new__(cls) return cls._instance def __init__(self, resource, push_api): @@ -62,7 +62,7 @@ class ResourceCallBackBase(object): pass -class LoggingServiceDriverManager(object): +class LoggingServiceDriverManager: def __init__(self): self._drivers = set() diff --git a/neutron/services/logapi/drivers/openvswitch/ovs_firewall_log.py b/neutron/services/logapi/drivers/openvswitch/ovs_firewall_log.py index 55aa0c2b22e..6498c58d831 100644 --- a/neutron/services/logapi/drivers/openvswitch/ovs_firewall_log.py +++ b/neutron/services/logapi/drivers/openvswitch/ovs_firewall_log.py @@ -77,7 +77,7 @@ def find_deleted_sg_rules(old_port, new_ports): return del_rules -class Cookie(object): +class Cookie: def __init__(self, cookie_id, port, action, project): self.id = cookie_id @@ -105,7 +105,7 @@ class Cookie(object): return not self.log_object_refs -class OFPortLog(object): +class OFPortLog: def __init__(self, port, ovs_port, log_event): self.id = port['port_id'] @@ -261,7 +261,7 @@ class OVSFirewallLoggingDriver(log_ext.LoggingDriver): action, context, resource_type, log_resources=log_resources) def _handle_logging(self, action, context, resource_type, **kwargs): - handler_name = "%s_%s_log" % (action, resource_type) + handler_name = "{}_{}_log".format(action, resource_type) handler = getattr(self, handler_name) handler(context, **kwargs) @@ -352,7 +352,7 @@ class OVSFirewallLoggingDriver(log_ext.LoggingDriver): dl_type = kwargs.get('dl_type') ovsfw.create_reg_numbers(kwargs) if isinstance(dl_type, int): - kwargs['dl_type'] = "0x{:04x}".format(dl_type) + kwargs['dl_type'] = f"0x{dl_type:04x}" LOG.debug("Add flow firewall log %s", str(kwargs)) if self._deferred: self.int_br.add_flow(**kwargs) diff --git a/neutron/services/logapi/drivers/ovn/driver.py b/neutron/services/logapi/drivers/ovn/driver.py index 0fa198b9c78..b697bbe051d 100644 --- a/neutron/services/logapi/drivers/ovn/driver.py +++ b/neutron/services/logapi/drivers/ovn/driver.py @@ -150,7 +150,7 @@ class OVNDriver(base.DriverBase): acl_changes += 1 msg = "Cleared %d, Not found %d (out of %d visited) ACLs" if log_name: - msg += " for network log {}".format(log_name) + msg += f" for network log {log_name}" LOG.info(msg, acl_changes, acl_absents, acl_visits) def _set_acls_log(self, pgs, context, ovn_txn, actions_enabled, log_name): @@ -158,8 +158,8 @@ class OVNDriver(base.DriverBase): for pg in pgs: meter_name = self.meter_name if pg["name"] != ovn_const.OVN_DROP_PORT_GROUP_NAME: - sg = sg_obj.SecurityGroup.get_sg_by_id(context, - pg["external_ids"][ovn_const.OVN_SG_EXT_ID_KEY]) + sg = sg_obj.SecurityGroup.get_sg_by_id( + context, pg["external_ids"][ovn_const.OVN_SG_EXT_ID_KEY]) if not sg: LOG.warning("Port Group %s is missing a corresponding " "security group, skipping its network log " @@ -331,9 +331,9 @@ class OVNDriver(base.DriverBase): acls_to_remove = [{"name": pgs[0]["name"], "acls": acls_to_check}] self._remove_acls_log(acls_to_remove, ovn_txn) else: - all_events = set([log.event for log in other_logs - if (not log.resource_id or - log.resource_id == log_obj.resource_id)]) + all_events = {log.event for log in other_logs + if (not log.resource_id or + log.resource_id == log_obj.resource_id)} if (log_const.ALL_EVENT not in all_events and log_obj.event not in all_events): self._remove_acls_log(pgs, ovn_txn) diff --git a/neutron/services/logapi/logging_plugin.py b/neutron/services/logapi/logging_plugin.py index 8e15ff343ed..c80404abc47 100644 --- a/neutron/services/logapi/logging_plugin.py +++ b/neutron/services/logapi/logging_plugin.py @@ -41,7 +41,7 @@ class LoggingPlugin(log_ext.LoggingPluginBase): __filter_validation_support = True def __init__(self): - super(LoggingPlugin, self).__init__() + super().__init__() self.driver_manager = driver_mgr.LoggingServiceDriverManager() self.validator_mgr = validators.ResourceValidateRequest.get_instance() diff --git a/neutron/services/logapi/rpc/agent.py b/neutron/services/logapi/rpc/agent.py index 0d1438a9e02..b5b92f40be0 100644 --- a/neutron/services/logapi/rpc/agent.py +++ b/neutron/services/logapi/rpc/agent.py @@ -19,7 +19,7 @@ from oslo_log import helpers as log_helpers import oslo_messaging -class LoggingApiStub(object): +class LoggingApiStub: """Stub proxy code for agent->server communication.""" def __init__(self): diff --git a/neutron/services/logapi/rpc/server.py b/neutron/services/logapi/rpc/server.py index eb6a4b428f9..0003abbb2cf 100644 --- a/neutron/services/logapi/rpc/server.py +++ b/neutron/services/logapi/rpc/server.py @@ -66,7 +66,7 @@ def get_sg_log_info_for_log_resources(context, log_resources): return db_api.get_sg_log_info_for_log_resources(context, log_resources) -class LoggingApiSkeleton(object): +class LoggingApiSkeleton: """Skeleton proxy code for agent->server communication.""" # History diff --git a/neutron/services/loki/loki_plugin.py b/neutron/services/loki/loki_plugin.py index b0357a13d41..356c70074b7 100644 --- a/neutron/services/loki/loki_plugin.py +++ b/neutron/services/loki/loki_plugin.py @@ -27,7 +27,7 @@ class LokiPlugin(service_base.ServicePluginBase): """Loki brings us the gift of sporadic database failures and delays.""" def __init__(self): - super(LokiPlugin, self).__init__() + super().__init__() db_api.sqla_listen(se.Session, 'before_flush', self.random_deadlock) db_api.sqla_listen(se.Session, 'loaded_as_persistent', self.random_delay) diff --git a/neutron/services/metering/agents/metering_agent.py b/neutron/services/metering/agents/metering_agent.py index 36600202e4c..3372ea950e7 100644 --- a/neutron/services/metering/agents/metering_agent.py +++ b/neutron/services/metering/agents/metering_agent.py @@ -41,14 +41,14 @@ from neutron.services.metering.drivers import utils as driverutils LOG = logging.getLogger(__name__) -class MeteringPluginRpc(object): +class MeteringPluginRpc: def __init__(self, host): # NOTE(yamamoto): super.__init__() call here is not only for # aesthetics. Because of multiple inheritances in MeteringAgent, # it's actually necessary to initialize parent classes of # manager.Manager correctly. - super(MeteringPluginRpc, self).__init__(host) + super().__init__(host) target = oslo_messaging.Target(topic=topics.METERING_PLUGIN, version='1.0') self.client = n_rpc.get_client(target) @@ -80,7 +80,7 @@ class MeteringAgent(MeteringPluginRpc, manager.Manager): self.routers = {} self.metering_infos = {} self.metering_labels = {} - super(MeteringAgent, self).__init__(host=host) + super().__init__(host=host) def _load_drivers(self): """Loads plugin-driver from configuration.""" @@ -347,8 +347,8 @@ class MeteringAgent(MeteringPluginRpc, manager.Manager): class MeteringAgentWithStateReport(MeteringAgent): def __init__(self, host, conf=None): - super(MeteringAgentWithStateReport, self).__init__(host=host, - conf=conf) + super().__init__(host=host, + conf=conf) self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS) self.failed_report_state = False self.agent_state = { diff --git a/neutron/services/metering/drivers/abstract_driver.py b/neutron/services/metering/drivers/abstract_driver.py index 48d3fed413a..5d5a4c0cfb4 100644 --- a/neutron/services/metering/drivers/abstract_driver.py +++ b/neutron/services/metering/drivers/abstract_driver.py @@ -21,7 +21,7 @@ BASE_PROJECT_TRAFFIC_COUNTER_KEY = "project-" BASE_LABEL_TRAFFIC_COUNTER_KEY = "label-" -class MeteringAbstractDriver(object, metaclass=abc.ABCMeta): +class MeteringAbstractDriver(metaclass=abc.ABCMeta): """Abstract Metering driver.""" def __init__(self, plugin, conf): diff --git a/neutron/services/metering/drivers/iptables/iptables_driver.py b/neutron/services/metering/drivers/iptables/iptables_driver.py index 08cecf43b37..b3012de1633 100644 --- a/neutron/services/metering/drivers/iptables/iptables_driver.py +++ b/neutron/services/metering/drivers/iptables/iptables_driver.py @@ -40,7 +40,7 @@ config.register_interface_driver_opts_helper(cfg.CONF) config.register_interface_opts() -class IptablesManagerTransaction(object): +class IptablesManagerTransaction: __transactions = {} def __init__(self, im): @@ -63,7 +63,7 @@ class IptablesManagerTransaction(object): self.__transactions[self.im] = transaction -class RouterWithMetering(object): +class RouterWithMetering: def __init__(self, conf, router): self.conf = conf @@ -122,7 +122,7 @@ class RouterWithMetering(object): class IptablesMeteringDriver(abstract_driver.MeteringAbstractDriver): def __init__(self, plugin, conf): - super(IptablesMeteringDriver, self).__init__(plugin, conf) + super().__init__(plugin, conf) self.routers = {} self.driver = common_utils.load_interface_driver(self.conf) @@ -141,7 +141,7 @@ class IptablesMeteringDriver(abstract_driver.MeteringAbstractDriver): @log_helpers.log_method_call def update_routers(self, context, routers): # disassociate removed routers - router_ids = set(router['id'] for router in routers) + router_ids = {router['id'] for router in routers} for router_id, rm in self.routers.items(): if router_id not in router_ids: self._process_disassociate_metering_label(rm.router) @@ -249,7 +249,7 @@ class IptablesMeteringDriver(abstract_driver.MeteringAbstractDriver): if rule['excluded']: ipt_rule = '%s -j RETURN' % ipt_rule else: - ipt_rule = '%s -j %s' % (ipt_rule, label_chain) + ipt_rule = '{} -j {}'.format(ipt_rule, label_chain) return ipt_rule @staticmethod @@ -261,11 +261,12 @@ class IptablesMeteringDriver(abstract_driver.MeteringAbstractDriver): source_ip_prefix = rule.get('source_ip_prefix') if source_ip_prefix: - iptables_rule = "-s %s %s" % (source_ip_prefix, iptables_rule) + iptables_rule = "-s {} {}".format(source_ip_prefix, iptables_rule) destination_ip_prefix = rule.get('destination_ip_prefix') if destination_ip_prefix: - iptables_rule = "-d %s %s" % (destination_ip_prefix, iptables_rule) + iptables_rule = "-d {} {}".format( + destination_ip_prefix, iptables_rule) return iptables_rule @@ -273,9 +274,9 @@ class IptablesMeteringDriver(abstract_driver.MeteringAbstractDriver): def prepare_source_and_destination_rule_legacy(ext_dev, rule): remote_ip = rule['remote_ip_prefix'] if rule['direction'] == 'egress': - ipt_rule = '-s %s -o %s' % (remote_ip, ext_dev) + ipt_rule = '-s {} -o {}'.format(remote_ip, ext_dev) else: - ipt_rule = '-d %s -i %s' % (remote_ip, ext_dev) + ipt_rule = '-d {} -i {}'.format(remote_ip, ext_dev) return ipt_rule def _process_ns_specific_metering_label(self, router, ext_dev, im): @@ -576,9 +577,9 @@ class IptablesMeteringDriver(abstract_driver.MeteringAbstractDriver): label_traffic_counter_key = self.get_label_traffic_counter_key( label_id) - project_label_traffic_counter_key = "%s-%s" % ( + project_label_traffic_counter_key = "{}-{}".format( project_traffic_counter_key, label_traffic_counter_key) - router_label_traffic_counter_key = "%s-%s" % ( + router_label_traffic_counter_key = "{}-{}".format( router_traffic_counter_key, label_traffic_counter_key) chain_acc = self.retrieve_traffic_counters(label_id, rm, router, diff --git a/neutron/services/metering/metering_plugin.py b/neutron/services/metering/metering_plugin.py index 54ac1441e0d..b9dc908936f 100644 --- a/neutron/services/metering/metering_plugin.py +++ b/neutron/services/metering/metering_plugin.py @@ -42,7 +42,7 @@ class MeteringPlugin(metering_db.MeteringDbMixin): __filter_validation_support = True def __init__(self): - super(MeteringPlugin, self).__init__() + super().__init__() self.meter_rpc = metering_rpc_agent_api.MeteringAgentNotifyAPI() rpc_worker = service.RpcWorker([self], worker_process_count=0) @@ -57,7 +57,7 @@ class MeteringPlugin(metering_db.MeteringDbMixin): return self.conn.consume_in_threads() def create_metering_label(self, context, metering_label): - label = super(MeteringPlugin, self).create_metering_label( + label = super().create_metering_label( context, metering_label) data = self.get_sync_data_metering(context) @@ -67,7 +67,7 @@ class MeteringPlugin(metering_db.MeteringDbMixin): def delete_metering_label(self, context, label_id): data = self.get_sync_data_metering(context, label_id) - super(MeteringPlugin, self).delete_metering_label( + super().delete_metering_label( context, label_id) self.meter_rpc.remove_metering_label(context, data) @@ -77,7 +77,7 @@ class MeteringPlugin(metering_db.MeteringDbMixin): MeteringPlugin.validate_metering_label_rule(metering_label_rule) self.check_for_rule_overlaps(context, metering_label_rule) - rule = super(MeteringPlugin, self).create_metering_label_rule( + rule = super().create_metering_label_rule( context, metering_label_rule) if rule.get("remote_ip_prefix"): @@ -168,7 +168,7 @@ class MeteringPlugin(metering_db.MeteringDbMixin): remote_ip_prefix=remote_ip_prefix) def delete_metering_label_rule(self, context, rule_id): - rule = super(MeteringPlugin, self).delete_metering_label_rule( + rule = super().delete_metering_label_rule( context, rule_id) data = self.get_sync_data_for_rule(context, rule) diff --git a/neutron/services/ndp_proxy/plugin.py b/neutron/services/ndp_proxy/plugin.py index 64dea37388c..d4a739261c1 100644 --- a/neutron/services/ndp_proxy/plugin.py +++ b/neutron/services/ndp_proxy/plugin.py @@ -61,7 +61,7 @@ class NDPProxyPlugin(l3_ndp_proxy.NDPProxyBase): __filter_validation_support = True def __init__(self): - super(NDPProxyPlugin, self).__init__() + super().__init__() self.push_api = resources_rpc.ResourcesPushRpcApi() self.l3_plugin = directory.get_plugin(constants.L3) self.core_plugin = directory.get_plugin() @@ -117,8 +117,8 @@ class NDPProxyPlugin(l3_ndp_proxy.NDPProxyBase): (f.get('ip_address') and netaddr.IPNetwork(f['ip_address']).version == V6)]: return - subnet_ids = set(f['subnet_id'] for f in ext_ips - if f.get('subnet_id')) + subnet_ids = {f['subnet_id'] for f in ext_ips + if f.get('subnet_id')} for subnet_id in subnet_ids: if self.core_plugin.get_subnet( context, subnet_id)['ip_version'] == V6: diff --git a/neutron/services/network_ip_availability/plugin.py b/neutron/services/network_ip_availability/plugin.py index 25a2244873a..3e24fd8580a 100644 --- a/neutron/services/network_ip_availability/plugin.py +++ b/neutron/services/network_ip_availability/plugin.py @@ -46,9 +46,8 @@ class NetworkIPAvailabilityPlugin(ip_availability_db.IpAvailabilityMixin, def get_network_ip_availabilities(self, context, filters=None, fields=None): """Returns ip availability data for a collection of networks.""" - net_ip_availabilities = super( - NetworkIPAvailabilityPlugin, self - ).get_network_ip_availabilities(context, filters) + net_ip_availabilities = super().get_network_ip_availabilities( + context, filters) return [db_utils.resource_fields(net_ip_availability, fields) for net_ip_availability in net_ip_availabilities] diff --git a/neutron/services/network_segment_range/plugin.py b/neutron/services/network_segment_range/plugin.py index a5c51310f49..a02b35b959f 100644 --- a/neutron/services/network_segment_range/plugin.py +++ b/neutron/services/network_segment_range/plugin.py @@ -64,7 +64,7 @@ class NetworkSegmentRangePlugin(ext_range.NetworkSegmentRangePluginBase): __filter_validation_support = True def __init__(self): - super(NetworkSegmentRangePlugin, self).__init__() + super().__init__() self.type_manager = directory.get_plugin().type_manager self.type_manager.initialize_network_segment_range_support() diff --git a/neutron/services/ovn_l3/plugin.py b/neutron/services/ovn_l3/plugin.py index bbb970f138b..c1fd1382598 100644 --- a/neutron/services/ovn_l3/plugin.py +++ b/neutron/services/ovn_l3/plugin.py @@ -86,7 +86,7 @@ class OVNL3RouterPlugin(service_base.ServicePluginBase, floatingip=l3_models.FloatingIP) def __init__(self): LOG.info("Starting OVNL3RouterPlugin") - super(OVNL3RouterPlugin, self).__init__() + super().__init__() self._plugin_property = None self._mech = None self._initialize_plugin_driver() @@ -165,7 +165,7 @@ class OVNL3RouterPlugin(service_base.ServicePluginBase, interface_info): try: router_interface_info = ( - super(OVNL3RouterPlugin, self).add_router_interface( + super().add_router_interface( context, router_id, interface_info)) except n_exc.PortInUse: # NOTE(lucasagomes): If the port is already being used it means @@ -191,7 +191,7 @@ class OVNL3RouterPlugin(service_base.ServicePluginBase, # this method to create the floating IP in the DB with status down, # while the flavor drivers are responsible for calling the correct # backend to instatiate the floating IP in the data plane - return super(OVNL3RouterPlugin, self).create_floatingip( + return super().create_floatingip( context, floatingip, initial_status) def update_floatingip_status(self, context, floatingip_id, status): @@ -207,7 +207,7 @@ class OVNL3RouterPlugin(service_base.ServicePluginBase, @db_api.retry_if_session_inactive() def update_floatingip_status_retry(self, context, floatingip_id, status): with db_api.CONTEXT_WRITER.using(context): - return super(OVNL3RouterPlugin, self).update_floatingip_status( + return super().update_floatingip_status( context, floatingip_id, status) def _get_gateway_port_physnet_mapping(self): diff --git a/neutron/services/ovn_l3/service_providers/driver_controller.py b/neutron/services/ovn_l3/service_providers/driver_controller.py index aec34d389ab..afc7d0abc04 100644 --- a/neutron/services/ovn_l3/service_providers/driver_controller.py +++ b/neutron/services/ovn_l3/service_providers/driver_controller.py @@ -54,7 +54,7 @@ class _OvnPlusProviderConfiguration( def __init__(self): # loads up the OVN provider automatically and sets it as default. - super(_OvnPlusProviderConfiguration, self).__init__( + super().__init__( svc_type=plugin_constants.L3) path = 'neutron.services.ovn_l3.service_providers.ovn.OvnDriver' try: diff --git a/neutron/services/ovn_l3/service_providers/user_defined.py b/neutron/services/ovn_l3/service_providers/user_defined.py index b57b6f559bc..748cc11359c 100644 --- a/neutron/services/ovn_l3/service_providers/user_defined.py +++ b/neutron/services/ovn_l3/service_providers/user_defined.py @@ -39,7 +39,7 @@ class UserDefined(base.L3ServiceProvider): ha_support = base.OPTIONAL def __init__(self, l3_plugin): - super(UserDefined, self).__init__(l3_plugin) + super().__init__(l3_plugin) self._user_defined_provider = __name__ + "." + self.__class__.__name__ @property diff --git a/neutron/services/placement_report/plugin.py b/neutron/services/placement_report/plugin.py index a00673adff4..1b6d2771476 100644 --- a/neutron/services/placement_report/plugin.py +++ b/neutron/services/placement_report/plugin.py @@ -248,7 +248,7 @@ class PlacementReportPlugin(service_base.ServicePluginBase): agent['agent_type'], agent['host']) -class PlacementReporterAgents(object): +class PlacementReporterAgents: # Yep, this is meant to depend on ML2. def __init__(self, ml2_plugin): diff --git a/neutron/services/portforwarding/drivers/ovn/driver.py b/neutron/services/portforwarding/drivers/ovn/driver.py index 02c4cca5cb9..47f440ace8a 100644 --- a/neutron/services/portforwarding/drivers/ovn/driver.py +++ b/neutron/services/portforwarding/drivers/ovn/driver.py @@ -33,7 +33,7 @@ from neutron.services.portforwarding import constants as pf_const LOG = log.getLogger(__name__) -class OVNPortForwardingHandler(object): +class OVNPortForwardingHandler: @staticmethod def _get_lb_protocol(pf_obj): return pf_const.LB_PROTOCOL_MAP[pf_obj.protocol] @@ -56,10 +56,10 @@ class OVNPortForwardingHandler(object): lb_name = cls.lb_name(pf_obj.floatingip_id, cls._get_lb_protocol(pf_obj), external_port) - vip = "{}:{}".format(pf_obj.floating_ip_address, pf_obj.external_port) + vip = f"{pf_obj.floating_ip_address}:{pf_obj.external_port}" internal_ip = "{}:{}".format(pf_obj.internal_ip_address, pf_obj.internal_port) - rtr_name = 'neutron-{}'.format(pf_obj.router_id) + rtr_name = f'neutron-{pf_obj.router_id}' return lb_name, vip, [internal_ip], rtr_name def _get_lbs_and_ls(self, nb_ovn, payload): @@ -134,7 +134,7 @@ class OVNPortForwardingHandler(object): def _validate_router_networks(self, nb_ovn, router_id): if not ovn_conf.is_ovn_distributed_floating_ip(): return - rtr_name = 'neutron-{}'.format(router_id) + rtr_name = f'neutron-{router_id}' ovn_lr = nb_ovn.get_lrouter(rtr_name) if not ovn_lr: return @@ -224,7 +224,7 @@ class OVNPortForwardingHandler(object): @registry.has_registry_receivers -class OVNPortForwarding(object): +class OVNPortForwarding: def __init__(self, l3_plugin): self._validate_configuration() diff --git a/neutron/services/portforwarding/pf_plugin.py b/neutron/services/portforwarding/pf_plugin.py index 0b3ef3dab01..76d1b32e387 100644 --- a/neutron/services/portforwarding/pf_plugin.py +++ b/neutron/services/portforwarding/pf_plugin.py @@ -99,7 +99,7 @@ class PortForwardingPlugin(fip_pf.PortForwardingPluginBase): __filter_validation_support = True def __init__(self): - super(PortForwardingPlugin, self).__init__() + super().__init__() self.push_api = resources_rpc.ResourcesPushRpcApi() \ if self._rpc_notifications_required else None self.l3_plugin = directory.get_plugin(constants.L3) @@ -493,13 +493,13 @@ class PortForwardingPlugin(fip_pf.PortForwardingPluginBase): protocol=None, internal_ip_address=None): external_range_pf_dict = pf_dict.get('external_port_range') if not external_range_pf_dict and 'external_port' in pf_dict: - external_range_pf_dict = '%(port)s:%(port)s' % { - 'port': pf_dict.get('external_port')} + external_range_pf_dict = '{port}:{port}'.format( + port=pf_dict.get('external_port')) internal_range_pf_dict = pf_dict.get('internal_port_range') if not internal_range_pf_dict and 'internal_port' in pf_dict: - internal_range_pf_dict = '%(port)s:%(port)s' % { - 'port': pf_dict.get('internal_port')} + internal_range_pf_dict = '{port}:{port}'.format( + port=pf_dict.get('internal_port')) internal_port_id = pf_dict.get('internal_port_id') or internal_port_id protocol = pf_dict.get('protocol') or protocol diff --git a/neutron/services/provider_configuration.py b/neutron/services/provider_configuration.py index af26a252007..5da389e3fd2 100644 --- a/neutron/services/provider_configuration.py +++ b/neutron/services/provider_configuration.py @@ -38,7 +38,7 @@ serviceprovider_opts = prov_config.serviceprovider_opts prov_config.register_service_provider_opts() -class NeutronModule(object): +class NeutronModule: """A Neutron extension module.""" def __init__(self, service_module): @@ -146,8 +146,8 @@ def get_provider_driver_class(driver, namespace=SERVICE_PROVIDERS): return driver except RuntimeError: return driver - new_driver = "%s.%s" % (driver_manager.__module__, - driver_manager.__name__) + new_driver = "{}.{}".format(driver_manager.__module__, + driver_manager.__name__) LOG.warning( "The configured driver %(driver)s has been moved, automatically " "using %(new_driver)s instead. Please update your config files, " @@ -215,7 +215,7 @@ class ServiceProviderAlreadyAssociated(n_exc.Conflict): "provider '%(provider)s' for service type '%(service_type)s'") -class ProviderConfiguration(object): +class ProviderConfiguration: def __init__(self, svc_module='neutron', svc_type=None): self.providers = {} diff --git a/neutron/services/qos/drivers/manager.py b/neutron/services/qos/drivers/manager.py index 248bb22ab77..15fc3bf144e 100644 --- a/neutron/services/qos/drivers/manager.py +++ b/neutron/services/qos/drivers/manager.py @@ -35,7 +35,7 @@ SKIPPED_VIF_TYPES = [ ] -class QosServiceDriverManager(object): +class QosServiceDriverManager: def __init__(self): self._drivers = [] diff --git a/neutron/services/qos/qos_plugin.py b/neutron/services/qos/qos_plugin.py index 659a133fe61..ce95376174d 100644 --- a/neutron/services/qos/qos_plugin.py +++ b/neutron/services/qos/qos_plugin.py @@ -94,7 +94,7 @@ class QoSPlugin(qos.QoSPluginBase): __filter_validation_support = True def __init__(self): - super(QoSPlugin, self).__init__() + super().__init__() self.driver_manager = manager.QosServiceDriverManager() self._placement_client = pl_client.PlacementAPIClient(cfg.CONF) @@ -522,8 +522,8 @@ class QoSPlugin(qos.QoSPluginBase): if not original_rules and not desired_rules: return - o_rule_types = set(r.rule_type for r in original_rules) - d_rule_types = set(r.rule_type for r in desired_rules) + o_rule_types = {r.rule_type for r in original_rules} + d_rule_types = {r.rule_type for r in desired_rules} allocation = orig_port['binding:profile'].get('allocation') if (not original_rules and desired_rules) or not allocation: LOG.warning("There was no QoS policy with minimum_bandwidth or " diff --git a/neutron/services/revisions/revision_plugin.py b/neutron/services/revisions/revision_plugin.py index 28e5e91c904..d73319a1feb 100644 --- a/neutron/services/revisions/revision_plugin.py +++ b/neutron/services/revisions/revision_plugin.py @@ -37,7 +37,7 @@ class RevisionPlugin(service_base.ServicePluginBase): __filter_validation_support = True def __init__(self): - super(RevisionPlugin, self).__init__() + super().__init__() # background on these event hooks: # https://docs.sqlalchemy.org/en/latest/orm/session_events.html db_api.sqla_listen(se.Session, 'before_flush', self.bump_revisions) @@ -328,4 +328,4 @@ class RevisionNumberConstraintFailed(webob.exc.HTTPPreconditionFailed): def __init__(self, expected, current): detail = (_("Constrained to %(exp)s, but current revision is %(cur)s") % {'exp': expected, 'cur': current}) - super(RevisionNumberConstraintFailed, self).__init__(detail=detail) + super().__init__(detail=detail) diff --git a/neutron/services/segments/db.py b/neutron/services/segments/db.py index 0cdf31ba0de..08fa3ab2acc 100644 --- a/neutron/services/segments/db.py +++ b/neutron/services/segments/db.py @@ -62,7 +62,7 @@ def check_user_configured_segment_plugin(): return _USER_CONFIGURED_SEGMENT_PLUGIN -class SegmentDbMixin(object): +class SegmentDbMixin: """Mixin class to add segment.""" @staticmethod diff --git a/neutron/services/segments/plugin.py b/neutron/services/segments/plugin.py index e6f303cdda7..76c571b70da 100644 --- a/neutron/services/segments/plugin.py +++ b/neutron/services/segments/plugin.py @@ -161,7 +161,7 @@ class Plugin(db.SegmentDbMixin, segment.SegmentPluginBase): raise n_exc.SubnetInUse(subnet_id=subnet.id) -class Event(object): +class Event: def __init__(self, method, segment_ids, total=None, reserved=None, segment_host_mappings=None, host=None): @@ -177,7 +177,7 @@ class Event(object): @registry.has_registry_receivers -class NovaSegmentNotifier(object): +class NovaSegmentNotifier: def __init__(self): self.p_client, self.n_client = self._get_clients() @@ -498,7 +498,7 @@ class NovaSegmentNotifier(object): @registry.has_registry_receivers -class SegmentHostRoutes(object): +class SegmentHostRoutes: def _get_subnets(self, context, network_id): return subnet_obj.Subnet.get_objects(context, network_id=network_id) @@ -574,10 +574,10 @@ class SegmentHostRoutes(object): routed network :returns True if host_routes and calc_host_routes are not equal """ - return ((set((route['destination'], - route['nexthop']) for route in host_routes) != - set((route['destination'], - route['nexthop']) for route in calc_host_routes))) + return ({(route['destination'], + route['nexthop']) for route in host_routes} != + {(route['destination'], + route['nexthop']) for route in calc_host_routes}) def _update_routed_network_host_routes(self, context, network_id, deleted_cidr=None): diff --git a/neutron/services/tag/tag_plugin.py b/neutron/services/tag/tag_plugin.py index 17d50469b68..9c10bfa4124 100644 --- a/neutron/services/tag/tag_plugin.py +++ b/neutron/services/tag/tag_plugin.py @@ -41,7 +41,7 @@ class TagPlugin(tagging.TagPluginBase): __filter_validation_support = True def __new__(cls, *args, **kwargs): - inst = super(TagPlugin, cls).__new__(cls, *args, **kwargs) + inst = super().__new__(cls, *args, **kwargs) tag_obj.register_tag_hooks() return inst diff --git a/neutron/services/timestamp/timestamp_db.py b/neutron/services/timestamp/timestamp_db.py index 872378ba196..43978c64fef 100644 --- a/neutron/services/timestamp/timestamp_db.py +++ b/neutron/services/timestamp/timestamp_db.py @@ -78,7 +78,7 @@ def _add_timestamp(mapper, _conn, target): @resource_extend.has_resource_extenders -class TimeStamp_db_mixin(object): +class TimeStamp_db_mixin: """Mixin class to add Time Stamp methods.""" def __new__(cls, *args, **kwargs): @@ -90,7 +90,7 @@ class TimeStamp_db_mixin(object): query_hook=None, filter_hook=None, result_filters=_change_since_result_filter_hook) - return super(TimeStamp_db_mixin, cls).__new__(cls, *args, **kwargs) + return super().__new__(cls, *args, **kwargs) def register_db_events(self): listen = db_api.sqla_listen diff --git a/neutron/services/timestamp/timestamp_plugin.py b/neutron/services/timestamp/timestamp_plugin.py index bad75db929f..c81145356ef 100644 --- a/neutron/services/timestamp/timestamp_plugin.py +++ b/neutron/services/timestamp/timestamp_plugin.py @@ -28,7 +28,7 @@ class TimeStampPlugin(service_base.ServicePluginBase, __filter_validation_support = True def __init__(self): - super(TimeStampPlugin, self).__init__() + super().__init__() self.register_db_events() # TODO(jlibosva): Move this to register_model_query_hook base_obj.register_filter_hook_on_model( diff --git a/neutron/services/trunk/drivers/base.py b/neutron/services/trunk/drivers/base.py index 25793f4c08c..45d7c6ef864 100644 --- a/neutron/services/trunk/drivers/base.py +++ b/neutron/services/trunk/drivers/base.py @@ -21,7 +21,7 @@ from neutron_lib.callbacks import resources @registry.has_registry_receivers -class DriverBase(object): +class DriverBase: def __init__(self, name, interfaces, segmentation_types, agent_type=None, can_trunk_bound_port=False): diff --git a/neutron/services/trunk/drivers/linuxbridge/agent/driver.py b/neutron/services/trunk/drivers/linuxbridge/agent/driver.py index 3caa4b63d55..784e7b114a1 100644 --- a/neutron/services/trunk/drivers/linuxbridge/agent/driver.py +++ b/neutron/services/trunk/drivers/linuxbridge/agent/driver.py @@ -46,7 +46,7 @@ class LinuxBridgeTrunkDriver(trunk_rpc.TrunkSkeleton): def __init__(self, plumber=None, trunk_api=None): self._plumber = plumber or trunk_plumber.Plumber() self._tapi = trunk_api or _TrunkAPI(trunk_rpc.TrunkStub()) - super(LinuxBridgeTrunkDriver, self).__init__() + super().__init__() def handle_trunks(self, context, resource_type, trunks, event_type): """Trunk data model change from the server.""" @@ -118,7 +118,7 @@ class LinuxBridgeTrunkDriver(trunk_rpc.TrunkSkeleton): t_const.TRUNK_DEGRADED_STATUS) -class _TrunkAPI(object): +class _TrunkAPI: """Our secret stash of trunks stored by port ID. Tell no one.""" def __init__(self, trunk_stub): diff --git a/neutron/services/trunk/drivers/linuxbridge/agent/trunk_plumber.py b/neutron/services/trunk/drivers/linuxbridge/agent/trunk_plumber.py index b6f77ff2491..31b9ad4f09b 100644 --- a/neutron/services/trunk/drivers/linuxbridge/agent/trunk_plumber.py +++ b/neutron/services/trunk/drivers/linuxbridge/agent/trunk_plumber.py @@ -22,7 +22,7 @@ from neutron.plugins.ml2.drivers.linuxbridge.agent.common import utils as lutil LOG = logging.getLogger(__name__) -class Plumber(object): +class Plumber: """Object responsible for VLAN interface CRUD. This handles the creation/deletion/listing of VLAN interfaces for diff --git a/neutron/services/trunk/drivers/openvswitch/agent/driver.py b/neutron/services/trunk/drivers/openvswitch/agent/driver.py index 68e43422090..3b027c8a901 100644 --- a/neutron/services/trunk/drivers/openvswitch/agent/driver.py +++ b/neutron/services/trunk/drivers/openvswitch/agent/driver.py @@ -36,7 +36,7 @@ class OVSTrunkSkeleton(agent.TrunkSkeleton): """ def __init__(self, ovsdb_handler): - super(OVSTrunkSkeleton, self).__init__() + super().__init__() self.ovsdb_handler = ovsdb_handler registry.unsubscribe(self.handle_trunks, resources.TRUNK) diff --git a/neutron/services/trunk/drivers/openvswitch/agent/ovsdb_handler.py b/neutron/services/trunk/drivers/openvswitch/agent/ovsdb_handler.py index 1f931af557c..22fb1b572bf 100644 --- a/neutron/services/trunk/drivers/openvswitch/agent/ovsdb_handler.py +++ b/neutron/services/trunk/drivers/openvswitch/agent/ovsdb_handler.py @@ -112,7 +112,7 @@ def bridge_has_service_port(bridge): @registry.has_registry_receivers -class OVSDBHandler(object): +class OVSDBHandler: """It listens to OVSDB events to create the physical resources associated to a logical trunk in response to OVSDB events (such as VM boot and/or delete). diff --git a/neutron/services/trunk/drivers/openvswitch/agent/trunk_manager.py b/neutron/services/trunk/drivers/openvswitch/agent/trunk_manager.py index bb9559b739f..07c31ba9e64 100644 --- a/neutron/services/trunk/drivers/openvswitch/agent/trunk_manager.py +++ b/neutron/services/trunk/drivers/openvswitch/agent/trunk_manager.py @@ -31,7 +31,7 @@ def get_br_int_port_name(prefix, port_id): The port name is the one that plumbs into the integration bridge. """ - return ("%si-%s" % (prefix, port_id))[:constants.DEVICE_NAME_MAX_LEN] + return ("{}i-{}".format(prefix, port_id))[:constants.DEVICE_NAME_MAX_LEN] def get_br_trunk_port_name(prefix, port_id): @@ -39,7 +39,7 @@ def get_br_trunk_port_name(prefix, port_id): The port name is the one that plumbs into the trunk bridge. """ - return ("%st-%s" % (prefix, port_id))[:constants.DEVICE_NAME_MAX_LEN] + return ("{}t-{}".format(prefix, port_id))[:constants.DEVICE_NAME_MAX_LEN] def get_patch_peer_attrs(peer_name, port_mac=None, port_id=None): @@ -63,13 +63,13 @@ class TrunkBridge(ovs_lib.OVSBridge): """ def __init__(self, trunk_id): name = utils.gen_trunk_br_name(trunk_id) - super(TrunkBridge, self).__init__(name) + super().__init__(name) def exists(self): return self.bridge_exists(self.br_name) -class TrunkParentPort(object): +class TrunkParentPort: """An OVS trunk parent port. A trunk parent port is represented in OVS with two patch ports that @@ -155,7 +155,7 @@ class SubPort(TrunkParentPort): DEV_PREFIX = 'sp' def __init__(self, trunk_id, port_id, port_mac=None, segmentation_id=None): - super(SubPort, self).__init__(trunk_id, port_id, port_mac) + super().__init__(trunk_id, port_id, port_mac) self.segmentation_id = segmentation_id def plug(self, br_int): @@ -170,7 +170,7 @@ class SubPort(TrunkParentPort): :param br_int: an integration bridge where peer endpoint of patch port will be created. """ - super(SubPort, self).plug(br_int, tag=self.segmentation_id) + super().plug(br_int, tag=self.segmentation_id) def unplug(self, bridge): """Unplug the sub port from the bridge. @@ -190,7 +190,7 @@ class SubPort(TrunkParentPort): bridge.br_name)) -class TrunkManager(object): +class TrunkManager: """It implements the OVS trunk dataplane. It interfaces with the OVSDB server to execute OVS commands. diff --git a/neutron/services/trunk/drivers/ovn/trunk_driver.py b/neutron/services/trunk/drivers/ovn/trunk_driver.py index f57e45a858d..2195c70e190 100644 --- a/neutron/services/trunk/drivers/ovn/trunk_driver.py +++ b/neutron/services/trunk/drivers/ovn/trunk_driver.py @@ -40,7 +40,7 @@ SUPPORTED_SEGMENTATION_TYPES = ( LOG = log.getLogger(__name__) -class OVNTrunkHandler(object): +class OVNTrunkHandler: def __init__(self, plugin_driver): self.plugin_driver = plugin_driver @@ -204,7 +204,7 @@ class OVNTrunkDriver(trunk_base.DriverBase): @registry.receives(resources.TRUNK_PLUGIN, [events.AFTER_INIT]) def register(self, resource, event, trigger, payload=None): - super(OVNTrunkDriver, self).register( + super().register( resource, event, trigger, payload=payload) self._handler = OVNTrunkHandler(self.plugin_driver) diff --git a/neutron/services/trunk/rpc/agent.py b/neutron/services/trunk/rpc/agent.py index d12258038fb..6ffdc21f41e 100644 --- a/neutron/services/trunk/rpc/agent.py +++ b/neutron/services/trunk/rpc/agent.py @@ -38,7 +38,7 @@ from neutron.services.trunk.rpc import constants as trunk_consts # For server-side stub and skeleton proxy code, please look at server.py -class TrunkSkeleton(object): +class TrunkSkeleton: """Skeleton proxy code for server->agent communication.""" def __init__(self): @@ -75,7 +75,7 @@ class TrunkSkeleton(object): # manage, the notification should be ignored. -class TrunkStub(object): +class TrunkStub: """Stub proxy code for agent->server communication.""" # API HISTORY # 1.0 - initial version diff --git a/neutron/services/trunk/rpc/backend.py b/neutron/services/trunk/rpc/backend.py index 18d91edabc7..23846c13cdc 100644 --- a/neutron/services/trunk/rpc/backend.py +++ b/neutron/services/trunk/rpc/backend.py @@ -23,7 +23,7 @@ LOG = logging.getLogger(__name__) @registry.has_registry_receivers -class ServerSideRpcBackend(object): +class ServerSideRpcBackend: """The Neutron Server RPC backend.""" def __init__(self): diff --git a/neutron/services/trunk/rpc/server.py b/neutron/services/trunk/rpc/server.py index be5fd86a896..d9749b259e6 100644 --- a/neutron/services/trunk/rpc/server.py +++ b/neutron/services/trunk/rpc/server.py @@ -55,7 +55,7 @@ def trunk_by_port_provider(resource, port_id, context, **kwargs): return trunk_objects.Trunk.get_object(context, port_id=port_id) -class TrunkSkeleton(object): +class TrunkSkeleton: """Skeleton proxy code for agent->server communication.""" # API version history: @@ -191,7 +191,7 @@ class TrunkSkeleton(object): return port -class TrunkStub(object): +class TrunkStub: """Stub proxy code for server->agent communication.""" def __init__(self): diff --git a/neutron/services/trunk/rules.py b/neutron/services/trunk/rules.py index 9d9587f8aff..7b5e7d7de55 100644 --- a/neutron/services/trunk/rules.py +++ b/neutron/services/trunk/rules.py @@ -58,7 +58,7 @@ def enforce_port_deletion_rules(resource, event, trigger, payload=None): trunk_id=trunk_obj.id) -class TrunkPortValidator(object): +class TrunkPortValidator: def __init__(self, port_id): self.port_id = port_id @@ -150,7 +150,7 @@ class TrunkPortValidator(object): device_id=self._port['device_id']) -class SubPortsValidator(object): +class SubPortsValidator: def __init__(self, segmentation_types, subports, trunk_port_id=None): self._segmentation_types = segmentation_types diff --git a/neutron/tests/unit/agent/linux/test_daemon.py b/neutron/tests/unit/agent/linux/test_daemon.py index 5e875084572..8ea5a5f4d89 100644 --- a/neutron/tests/unit/agent/linux/test_daemon.py +++ b/neutron/tests/unit/agent/linux/test_daemon.py @@ -202,7 +202,7 @@ class TestPidfile(base.BaseTestCase): read.return_value = 34 self.assertTrue(p.is_running()) - mock_open.assert_called_once_with('/proc/34/cmdline', 'r') + mock_open.assert_called_once_with('/proc/34/cmdline') def test_is_running_uuid_true(self): mock_open = self.useFixture( @@ -215,7 +215,7 @@ class TestPidfile(base.BaseTestCase): read.return_value = 34 self.assertTrue(p.is_running()) - mock_open.assert_called_once_with('/proc/34/cmdline', 'r') + mock_open.assert_called_once_with('/proc/34/cmdline') def test_is_running_uuid_false(self): mock_open = self.useFixture( @@ -228,7 +228,7 @@ class TestPidfile(base.BaseTestCase): read.return_value = 34 self.assertFalse(p.is_running()) - mock_open.assert_called_once_with('/proc/34/cmdline', 'r') + mock_open.assert_called_once_with('/proc/34/cmdline') class TestDaemon(base.BaseTestCase): diff --git a/neutron/tests/unit/agent/linux/test_utils.py b/neutron/tests/unit/agent/linux/test_utils.py index 4e01bb907cd..a0ba5c392bc 100644 --- a/neutron/tests/unit/agent/linux/test_utils.py +++ b/neutron/tests/unit/agent/linux/test_utils.py @@ -274,7 +274,7 @@ class TestGetCmdlineFromPid(base.BaseTestCase): lib_fixtures.OpenFixture('/proc/%s/cmdline' % self.pid, process) ).mock_open cmdline = utils.get_cmdline_from_pid(self.pid) - mock_open.assert_called_once_with('/proc/%s/cmdline' % self.pid, 'r') + mock_open.assert_called_once_with('/proc/%s/cmdline' % self.pid) self.assertEqual(expected_cmd, cmdline) def test_cmdline_separated_with_null_char(self): @@ -308,7 +308,7 @@ class TestGetCmdlineFromPid(base.BaseTestCase): ).mock_open mock_open.side_effect = IOError() cmdline = utils.get_cmdline_from_pid(self.pid) - mock_open.assert_called_once_with('/proc/%s/cmdline' % self.pid, 'r') + mock_open.assert_called_once_with('/proc/%s/cmdline' % self.pid) self.assertEqual([], cmdline) diff --git a/neutron/worker.py b/neutron/worker.py index 44d99ec71cb..7aef6c21c71 100644 --- a/neutron/worker.py +++ b/neutron/worker.py @@ -20,21 +20,21 @@ class NeutronBaseWorker(worker.BaseWorker): def __init__(self, worker_process_count=1, set_proctitle=None, desc=None): set_proctitle = set_proctitle or cfg.CONF.setproctitle - super(NeutronBaseWorker, self).__init__( + super().__init__( worker_process_count=worker_process_count, set_proctitle=set_proctitle, desc=desc, ) def start(self, name="neutron-server", desc=None): - super(NeutronBaseWorker, self).start(name=name, desc=desc) + super().start(name=name, desc=desc) class PeriodicWorker(NeutronBaseWorker): """A worker that runs a function at a fixed interval.""" def __init__(self, check_func, interval, initial_delay): - super(PeriodicWorker, self).__init__(worker_process_count=0) + super().__init__(worker_process_count=0) self._check_func = check_func self._loop = None @@ -42,7 +42,7 @@ class PeriodicWorker(NeutronBaseWorker): self._initial_delay = initial_delay def start(self): - super(PeriodicWorker, self).start(desc="periodic worker") + super().start(desc="periodic worker") if self._loop is None: self._loop = loopingcall.FixedIntervalLoopingCall(self._check_func) self._loop.start(interval=self._interval, diff --git a/tools/download_gerrit_change.py b/tools/download_gerrit_change.py index 31ba1a63ebf..776fbefdf9b 100755 --- a/tools/download_gerrit_change.py +++ b/tools/download_gerrit_change.py @@ -30,7 +30,7 @@ TIMEOUT = 10 def fetch(change, output_patch=None, url=GERRIT_URL, timeout=TIMEOUT): params = {'download': None} r = requests.get( - url='{}/changes/{}/revisions/current/patch'.format(url, change), + url=f'{url}/changes/{change}/revisions/current/patch', params=params, timeout=timeout) r.raise_for_status() diff --git a/tools/files_in_patch.py b/tools/files_in_patch.py index 30bfb1c6f0d..423f095e9a2 100755 --- a/tools/files_in_patch.py +++ b/tools/files_in_patch.py @@ -47,7 +47,7 @@ def parse_input(input_file): def prune_unwanted_names(): global file_names - unwanted_names = set(['/dev/null']) + unwanted_names = {'/dev/null'} for curr_file_name in file_names: # ignore files that end in '.orig' as long as non-.orig exists @@ -70,11 +70,11 @@ if __name__ == '__main__': else: for curr_input_name in sys.argv[1:]: try: - with open(curr_input_name, 'r') as curr_input_file: + with open(curr_input_name) as curr_input_file: parse_input(curr_input_file) - except IOError as e_str: + except OSError as e_str: sys.stderr.write( - "Cannot open {}: {}\n".format(curr_input_name, e_str)) + f"Cannot open {curr_input_name}: {e_str}\n") sys.exit(255) prune_unwanted_names() diff --git a/tools/migrate_names.py b/tools/migrate_names.py index 2a6b581e1ae..abb7316011d 100755 --- a/tools/migrate_names.py +++ b/tools/migrate_names.py @@ -33,7 +33,7 @@ Migration = namedtuple('Migration', 'from_repo to_repo') def read_mapfile(mapfile): dirmaps = [] - with open(mapfile, 'r') as mapfile_fd: + with open(mapfile) as mapfile_fd: for line_buffer in mapfile_fd.readlines(): # ignore empty lines and anything after # line_match = re.search("^([^#]+)", line_buffer.strip()) @@ -63,10 +63,10 @@ def parse_input(dirmaps, patch_content, output_fd): new_line_buffer = line_buffer.replace(old, new) if new_line_buffer != line_buffer: filename_replaced = True - output_fd.write("{}\n".format(new_line_buffer)) + output_fd.write(f"{new_line_buffer}\n") break if not filename_replaced: - output_fd.write("{}\n".format(line_buffer)) + output_fd.write(f"{line_buffer}\n") @contextlib.contextmanager @@ -125,7 +125,7 @@ def cli(): if parsed_args.reverse: dirmaps = [Migration(two, one) for one, two in dirmaps] if os.path.isfile(parsed_args.input_patch): - with open(parsed_args.input_patch, 'r') as input_fd: + with open(parsed_args.input_patch) as input_fd: patch_content = ''.join(input_fd.readlines()) else: patch_content = download_gerrit_change.fetch(parsed_args.input_patch)