hacking: Resolve W503 (line break occurred before a binary operator)
Change-Id: I6381365ff882cf23808e8dabfce41143c5e35192 Signed-off-by: Stephen Finucane <sfinucan@redhat.com>
This commit is contained in:
parent
914ee91a5c
commit
231908a7f4
@ -414,8 +414,8 @@ class InstanceMetadata(object):
|
||||
device_metadata['mac'] = device.mac
|
||||
# NOTE(artom) If a device has neither tags, vlan or
|
||||
# vf_trusted, don't expose it
|
||||
if not ('tags' in device or 'vlan' in device_metadata
|
||||
or 'vf_trusted' in device_metadata):
|
||||
if not ('tags' in device or 'vlan' in device_metadata or
|
||||
'vf_trusted' in device_metadata):
|
||||
continue
|
||||
elif isinstance(device, metadata_obj.DiskMetadata):
|
||||
device_metadata['type'] = 'disk'
|
||||
|
@ -218,7 +218,7 @@ class AggregateController(wsgi.Controller):
|
||||
# case it is only ['availability_zone']) without worrying about
|
||||
# lazy-loading an unset variable
|
||||
for key in keys:
|
||||
if ((aggregate.obj_attr_is_set(key)
|
||||
or key in aggregate.obj_extra_fields) and
|
||||
if ((aggregate.obj_attr_is_set(key) or
|
||||
key in aggregate.obj_extra_fields) and
|
||||
(show_uuid or key != 'uuid')):
|
||||
yield key, getattr(aggregate, key)
|
||||
|
@ -1034,8 +1034,8 @@ class ServersController(wsgi.Controller):
|
||||
|
||||
helpers.translate_attributes(helpers.REBUILD, rebuild_dict, kwargs)
|
||||
|
||||
if (api_version_request.is_supported(req, min_version='2.54')
|
||||
and 'key_name' in rebuild_dict):
|
||||
if (api_version_request.is_supported(req, min_version='2.54') and
|
||||
'key_name' in rebuild_dict):
|
||||
kwargs['key_name'] = rebuild_dict.get('key_name')
|
||||
|
||||
# If user_data is not specified, we don't include it in kwargs because
|
||||
@ -1047,12 +1047,12 @@ class ServersController(wsgi.Controller):
|
||||
|
||||
# Skip policy check for 'rebuild:trusted_certs' if no trusted
|
||||
# certificate IDs were provided.
|
||||
if ((api_version_request.is_supported(req, min_version='2.63'))
|
||||
if ((api_version_request.is_supported(req, min_version='2.63')) and
|
||||
# Note that this is different from server create since with
|
||||
# rebuild a user can unset/reset the trusted certs by
|
||||
# specifying trusted_image_certificates=None, similar to
|
||||
# key_name.
|
||||
and ('trusted_image_certificates' in rebuild_dict)):
|
||||
('trusted_image_certificates' in rebuild_dict)):
|
||||
kwargs['trusted_certs'] = rebuild_dict.get(
|
||||
'trusted_image_certificates')
|
||||
context.can(server_policies.SERVERS % 'rebuild:trusted_certs',
|
||||
|
@ -81,10 +81,10 @@ class ShelveController(wsgi.Controller):
|
||||
# We could potentially move this check to conductor and avoid the
|
||||
# extra API call to neutron when we support move operations with ports
|
||||
# having resource requests.
|
||||
if (instance.vm_state == vm_states.SHELVED_OFFLOADED
|
||||
and common.instance_has_port_with_resource_request(
|
||||
context, instance.uuid, self.network_api)
|
||||
and not common.supports_port_resource_request_during_move(
|
||||
if (instance.vm_state == vm_states.SHELVED_OFFLOADED and
|
||||
common.instance_has_port_with_resource_request(
|
||||
context, instance.uuid, self.network_api) and
|
||||
not common.supports_port_resource_request_during_move(
|
||||
req)):
|
||||
msg = _("The unshelve action on a server with ports having "
|
||||
"resource requests, like a port with a QoS minimum "
|
||||
|
@ -168,8 +168,8 @@ class URLMap(paste.urlmap.URLMap):
|
||||
for (domain, app_url), app in self.applications:
|
||||
if domain and domain != host and domain != host + ':' + port:
|
||||
continue
|
||||
if (path_info == app_url
|
||||
or path_info.startswith(app_url + '/')):
|
||||
if (path_info == app_url or
|
||||
path_info.startswith(app_url + '/')):
|
||||
return app, app_url
|
||||
|
||||
return None, None
|
||||
|
@ -1510,8 +1510,8 @@ class API(base.Base):
|
||||
# be used for booting.
|
||||
boot_indexes = sorted([bdm.boot_index
|
||||
for bdm in block_device_mappings
|
||||
if bdm.boot_index is not None
|
||||
and bdm.boot_index >= 0])
|
||||
if bdm.boot_index is not None and
|
||||
bdm.boot_index >= 0])
|
||||
|
||||
# Each device which is capable of being used as boot device should
|
||||
# be given a unique boot index, starting from 0 in ascending order, and
|
||||
@ -5389,8 +5389,8 @@ class AggregateAPI(base.Base):
|
||||
context, 'availability_zone', hosts=_hosts)
|
||||
conflicting_azs = [
|
||||
agg.availability_zone for agg in host_aggregates
|
||||
if agg.availability_zone != metadata['availability_zone']
|
||||
and agg.id != aggregate.id]
|
||||
if agg.availability_zone != metadata['availability_zone'] and
|
||||
agg.id != aggregate.id]
|
||||
if conflicting_azs:
|
||||
msg = _("One or more hosts already in availability zone(s) "
|
||||
"%s") % conflicting_azs
|
||||
|
@ -1062,10 +1062,12 @@ class ComputeManager(manager.Manager):
|
||||
reboot_type = compute_utils.get_reboot_type(current_task_state,
|
||||
current_power_state)
|
||||
|
||||
pending_soft = (current_task_state == task_states.REBOOT_PENDING and
|
||||
instance.vm_state in vm_states.ALLOW_SOFT_REBOOT)
|
||||
pending_hard = (current_task_state == task_states.REBOOT_PENDING_HARD
|
||||
and instance.vm_state in vm_states.ALLOW_HARD_REBOOT)
|
||||
pending_soft = (
|
||||
current_task_state == task_states.REBOOT_PENDING and
|
||||
instance.vm_state in vm_states.ALLOW_SOFT_REBOOT)
|
||||
pending_hard = (
|
||||
current_task_state == task_states.REBOOT_PENDING_HARD and
|
||||
instance.vm_state in vm_states.ALLOW_HARD_REBOOT)
|
||||
started_not_running = (current_task_state in
|
||||
[task_states.REBOOT_STARTED,
|
||||
task_states.REBOOT_STARTED_HARD] and
|
||||
@ -2080,9 +2082,9 @@ class ComputeManager(manager.Manager):
|
||||
RequestGroup.
|
||||
"""
|
||||
|
||||
if (request_spec
|
||||
and 'requested_resources' in request_spec
|
||||
and request_spec.requested_resources is not None):
|
||||
if (request_spec and
|
||||
'requested_resources' in request_spec and
|
||||
request_spec.requested_resources is not None):
|
||||
return {
|
||||
group.requester_id: group.provider_uuids
|
||||
for group in request_spec.requested_resources
|
||||
@ -2096,8 +2098,8 @@ class ComputeManager(manager.Manager):
|
||||
return
|
||||
|
||||
def needs_update(pci_request, mapping):
|
||||
return (pci_request.requester_id
|
||||
and pci_request.requester_id in mapping)
|
||||
return (pci_request.requester_id and
|
||||
pci_request.requester_id in mapping)
|
||||
|
||||
modified = False
|
||||
for pci_request in instance.pci_requests.requests:
|
||||
|
@ -71,8 +71,8 @@ class Monitor(base.CPUMonitorBase):
|
||||
# We compute the utilization percentages for each specific CPU time
|
||||
# after calculating the delta between the current reading and the
|
||||
# previous reading.
|
||||
stats["total"] = (stats["user"] + stats["kernel"]
|
||||
+ stats["idle"] + stats["iowait"])
|
||||
stats["total"] = (stats["user"] + stats["kernel"] +
|
||||
stats["idle"] + stats["iowait"])
|
||||
cputime = float(stats["total"] - self._cpu_stats.get("total", 0))
|
||||
|
||||
# NOTE(jwcroppe): Convert all the `perc` values to their integer forms
|
||||
@ -92,9 +92,9 @@ class Monitor(base.CPUMonitorBase):
|
||||
|
||||
# Compute the current system-wide CPU utilization as a percentage.
|
||||
used = stats["user"] + stats["kernel"] + stats["iowait"]
|
||||
prev_used = (self._cpu_stats.get("user", 0)
|
||||
+ self._cpu_stats.get("kernel", 0)
|
||||
+ self._cpu_stats.get("iowait", 0))
|
||||
prev_used = (self._cpu_stats.get("user", 0) +
|
||||
self._cpu_stats.get("kernel", 0) +
|
||||
self._cpu_stats.get("iowait", 0))
|
||||
perc = (used - prev_used) / cputime
|
||||
self._data["cpu.percent"] = int(perc * 100)
|
||||
|
||||
|
@ -64,9 +64,8 @@ def _instance_in_resize_state(instance):
|
||||
if vm == vm_states.RESIZED:
|
||||
return True
|
||||
|
||||
if (vm in [vm_states.ACTIVE, vm_states.STOPPED]
|
||||
and task in (
|
||||
task_states.resizing_states + task_states.rebuild_states)):
|
||||
if vm in [vm_states.ACTIVE, vm_states.STOPPED] and task in (
|
||||
task_states.resizing_states + task_states.rebuild_states):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
@ -1429,9 +1429,9 @@ nova-network is deprecated, as are any related configuration options.
|
||||
]
|
||||
|
||||
|
||||
ALL_DEFAULT_OPTS = (linux_net_opts + network_opts + ldap_dns_opts
|
||||
+ rpcapi_opts + driver_opts + floating_ip_opts
|
||||
+ ipv6_opts + quota_opts + service_opts)
|
||||
ALL_DEFAULT_OPTS = (linux_net_opts + network_opts + ldap_dns_opts +
|
||||
rpcapi_opts + driver_opts + floating_ip_opts +
|
||||
ipv6_opts + quota_opts + service_opts)
|
||||
|
||||
|
||||
def register_opts(conf):
|
||||
|
@ -907,8 +907,8 @@ def _extract_attributes_v2(image, include_locations=False):
|
||||
'disk_format': None, 'container_format': None, 'name': None,
|
||||
'checksum': None}
|
||||
for name, value in image.items():
|
||||
if (name in omit_attrs
|
||||
or name in include_locations_attrs and not include_locations):
|
||||
if (name in omit_attrs or
|
||||
name in include_locations_attrs and not include_locations):
|
||||
continue
|
||||
elif name == 'visibility':
|
||||
output['is_public'] = value == 'public'
|
||||
|
@ -584,8 +584,8 @@ def metadata_forward():
|
||||
|
||||
|
||||
def _iptables_dest(ip):
|
||||
if ((netaddr.IPAddress(ip).version == 4 and ip == '127.0.0.1')
|
||||
or ip == '::1'):
|
||||
if ((netaddr.IPAddress(ip).version == 4 and ip == '127.0.0.1') or
|
||||
ip == '::1'):
|
||||
return '-m addrtype --dst-type LOCAL'
|
||||
else:
|
||||
return '-d %s' % ip
|
||||
|
@ -859,9 +859,9 @@ class API(base_api.NetworkAPI):
|
||||
# or if it is indirectly called through allocate_port_for_instance()
|
||||
# with None params=(network_id=None, requested_ip=None, port_id=None,
|
||||
# pci_request_id=None):
|
||||
if (not requested_networks
|
||||
or requested_networks.is_single_unspecified
|
||||
or requested_networks.auto_allocate):
|
||||
if (not requested_networks or
|
||||
requested_networks.is_single_unspecified or
|
||||
requested_networks.auto_allocate):
|
||||
# If no networks were requested and none are available, consider
|
||||
# it a bad request.
|
||||
if not nets:
|
||||
@ -1181,8 +1181,8 @@ class API(base_api.NetworkAPI):
|
||||
def _refresh_neutron_extensions_cache(self, context, neutron=None):
|
||||
"""Refresh the neutron extensions cache when necessary."""
|
||||
if (not self.last_neutron_extension_sync or
|
||||
((time.time() - self.last_neutron_extension_sync)
|
||||
>= CONF.neutron.extension_sync_interval)):
|
||||
((time.time() - self.last_neutron_extension_sync) >=
|
||||
CONF.neutron.extension_sync_interval)):
|
||||
if neutron is None:
|
||||
neutron = get_client(context)
|
||||
extensions_list = neutron.list_extensions()['extensions']
|
||||
@ -1691,8 +1691,8 @@ class API(base_api.NetworkAPI):
|
||||
|
||||
def _delete_nic_metadata(self, instance, vif):
|
||||
for device in instance.device_metadata.devices:
|
||||
if (isinstance(device, objects.NetworkInterfaceMetadata)
|
||||
and device.mac == vif.address):
|
||||
if (isinstance(device, objects.NetworkInterfaceMetadata) and
|
||||
device.mac == vif.address):
|
||||
instance.device_metadata.devices.remove(device)
|
||||
instance.save()
|
||||
break
|
||||
@ -2858,8 +2858,8 @@ class API(base_api.NetworkAPI):
|
||||
instance network info cache.
|
||||
"""
|
||||
vif_active = False
|
||||
if (current_neutron_port['admin_state_up'] is False
|
||||
or current_neutron_port['status'] == 'ACTIVE'):
|
||||
if (current_neutron_port['admin_state_up'] is False or
|
||||
current_neutron_port['status'] == 'ACTIVE'):
|
||||
vif_active = True
|
||||
|
||||
network_IPs = self._nw_info_get_ips(client,
|
||||
@ -3241,10 +3241,9 @@ class API(base_api.NetworkAPI):
|
||||
# that this function is called without a migration object, such
|
||||
# as in an unshelve operation.
|
||||
vnic_type = p.get('binding:vnic_type')
|
||||
if (vnic_type in network_model.VNIC_TYPES_SRIOV
|
||||
and migration is not None
|
||||
and migration['migration_type'] !=
|
||||
constants.LIVE_MIGRATION):
|
||||
if (vnic_type in network_model.VNIC_TYPES_SRIOV and
|
||||
migration is not None and
|
||||
migration['migration_type'] != constants.LIVE_MIGRATION):
|
||||
# Note(adrianc): for live migration binding profile was already
|
||||
# updated in conductor when calling bind_ports_to_host()
|
||||
if not pci_mapping:
|
||||
|
@ -174,9 +174,9 @@ class SecurityGroupAPI(security_group_base.SecurityGroupBase):
|
||||
# * names and ids don't exist.
|
||||
# * it is admin context and all_tenants exist in search_opts.
|
||||
# * project is not specified.
|
||||
list_all_tenants = (context.is_admin
|
||||
and 'all_tenants' in search_opts
|
||||
and not any([names, ids]))
|
||||
list_all_tenants = (context.is_admin and
|
||||
'all_tenants' in search_opts and
|
||||
not any([names, ids]))
|
||||
# NOTE(jeffrey4l): The neutron doesn't have `all-tenants` concept.
|
||||
# All the security group will be returned if the project/tenant
|
||||
# id is not passed.
|
||||
|
@ -67,8 +67,8 @@ class SecurityGroupBase(object):
|
||||
from_port is None and to_port is None):
|
||||
from_port = -1
|
||||
to_port = -1
|
||||
elif (ip_proto_upper in ['TCP', 'UDP'] and from_port is None
|
||||
and to_port is None):
|
||||
elif (ip_proto_upper in ['TCP', 'UDP'] and from_port is None and
|
||||
to_port is None):
|
||||
from_port = 1
|
||||
to_port = 65535
|
||||
|
||||
|
@ -118,8 +118,8 @@ def send_update_with_states(context, instance, old_vm_state, new_vm_state,
|
||||
if old_vm_state != new_vm_state:
|
||||
# yes, the vm state is changing:
|
||||
fire_update = True
|
||||
elif (CONF.notifications.notify_on_state_change == "vm_and_task_state"
|
||||
and old_task_state != new_task_state):
|
||||
elif (CONF.notifications.notify_on_state_change ==
|
||||
"vm_and_task_state" and old_task_state != new_task_state):
|
||||
# yes, the task state is changing:
|
||||
fire_update = True
|
||||
|
||||
|
@ -434,8 +434,8 @@ class BuildRequestList(base.ObjectListBase, base.NovaObject):
|
||||
|
||||
filtered_build_reqs.append(build_req)
|
||||
|
||||
if (((len(filtered_build_reqs) < 2) or (not sort_keys))
|
||||
and not marker):
|
||||
if (((len(filtered_build_reqs) < 2) or (not sort_keys)) and
|
||||
not marker):
|
||||
# No need to sort
|
||||
return cls(context, objects=filtered_build_reqs)
|
||||
|
||||
|
@ -144,9 +144,9 @@ class ComputeNode(base.NovaPersistentObject, base.NovaObject):
|
||||
|
||||
@staticmethod
|
||||
def _host_from_db_object(compute, db_compute):
|
||||
if (('host' not in db_compute or db_compute['host'] is None)
|
||||
and 'service_id' in db_compute
|
||||
and db_compute['service_id'] is not None):
|
||||
if (('host' not in db_compute or db_compute['host'] is None) and
|
||||
'service_id' in db_compute and
|
||||
db_compute['service_id'] is not None):
|
||||
# FIXME(sbauza) : Unconverted compute record, provide compatibility
|
||||
# This has to stay until we can be sure that any/all compute nodes
|
||||
# in the database have been converted to use the host field
|
||||
|
@ -580,8 +580,8 @@ class ImageMetaProps(base.NovaObject):
|
||||
|
||||
def _set_attr_from_trait_names(self, image_props):
|
||||
for trait in [six.text_type(k[6:]) for k, v in image_props.items()
|
||||
if six.text_type(k).startswith("trait:")
|
||||
and six.text_type(v) == six.text_type('required')]:
|
||||
if six.text_type(k).startswith("trait:") and
|
||||
six.text_type(v) == six.text_type('required')]:
|
||||
if 'traits_required' not in self:
|
||||
self.traits_required = []
|
||||
self.traits_required.append(trait)
|
||||
|
@ -102,8 +102,8 @@ class InstanceMapping(base.NovaTimestampObject, base.NovaObject):
|
||||
db_mapping = (context.session.query(api_models.InstanceMapping)
|
||||
.options(joinedload('cell_mapping'))
|
||||
.filter(
|
||||
api_models.InstanceMapping.instance_uuid
|
||||
== instance_uuid)).first()
|
||||
api_models.InstanceMapping.instance_uuid ==
|
||||
instance_uuid)).first()
|
||||
if not db_mapping:
|
||||
raise exception.InstanceMappingNotFound(uuid=instance_uuid)
|
||||
|
||||
|
@ -233,6 +233,6 @@ class InstanceNUMATopology(base.NovaObject,
|
||||
@property
|
||||
def emulator_threads_isolated(self):
|
||||
"""Determines whether emulator threads should be isolated"""
|
||||
return (self.obj_attr_is_set('emulator_threads_policy')
|
||||
and (self.emulator_threads_policy
|
||||
== obj_fields.CPUEmulatorThreadsPolicy.ISOLATE))
|
||||
return (self.obj_attr_is_set('emulator_threads_policy') and
|
||||
(self.emulator_threads_policy ==
|
||||
obj_fields.CPUEmulatorThreadsPolicy.ISOLATE))
|
||||
|
@ -332,12 +332,12 @@ class RequestSpec(base.NovaObject):
|
||||
:param hint_name: name of the hint
|
||||
:param default: the default value if the hint is not there
|
||||
"""
|
||||
if (not self.obj_attr_is_set('scheduler_hints')
|
||||
or self.scheduler_hints is None):
|
||||
if (not self.obj_attr_is_set('scheduler_hints') or
|
||||
self.scheduler_hints is None):
|
||||
return default
|
||||
hint_val = self.scheduler_hints.get(hint_name, default)
|
||||
return (hint_val[0] if isinstance(hint_val, list)
|
||||
and len(hint_val) == 1 else hint_val)
|
||||
return (hint_val[0] if isinstance(hint_val, list) and
|
||||
len(hint_val) == 1 else hint_val)
|
||||
|
||||
def _to_legacy_image(self):
|
||||
return base.obj_to_primitive(self.image) if (
|
||||
@ -810,9 +810,9 @@ class RequestSpec(base.NovaObject):
|
||||
|
||||
for group in self.requested_resources:
|
||||
# See the limitations in the func doc above
|
||||
if (not group.use_same_provider
|
||||
or group.aggregates
|
||||
or group.forbidden_traits):
|
||||
if (not group.use_same_provider or
|
||||
group.aggregates or
|
||||
group.forbidden_traits):
|
||||
raise NotImplementedError()
|
||||
|
||||
# Iterate through every possible group - RP mappings and try to find a
|
||||
|
@ -646,8 +646,8 @@ class SchedulerReportClient(object):
|
||||
# - "Cascading generations" - i.e. a change to a leaf node percolates
|
||||
# generation bump up the tree so that we bounce 409 the next time we
|
||||
# try to update anything and have to refresh.
|
||||
if (self._provider_tree.exists(uuid)
|
||||
and not self._associations_stale(uuid)):
|
||||
if (self._provider_tree.exists(uuid) and
|
||||
not self._associations_stale(uuid)):
|
||||
uuids_to_refresh = [
|
||||
u for u in self._provider_tree.get_provider_uuids(uuid)
|
||||
if self._associations_stale(u)]
|
||||
@ -1117,8 +1117,8 @@ class SchedulerReportClient(object):
|
||||
# Check whether aggregates need updating. We can only do this if we
|
||||
# have a cache entry with a matching generation.
|
||||
try:
|
||||
if (self._provider_tree.data(rp_uuid).generation == generation
|
||||
and not self._provider_tree.have_aggregates_changed(
|
||||
if (self._provider_tree.data(rp_uuid).generation == generation and
|
||||
not self._provider_tree.have_aggregates_changed(
|
||||
rp_uuid, aggregates)):
|
||||
return
|
||||
except ValueError:
|
||||
|
@ -44,8 +44,8 @@ class AggregateInstanceExtraSpecsFilter(filters.BaseHostFilter):
|
||||
instance_type = spec_obj.flavor
|
||||
# If 'extra_specs' is not present or extra_specs are empty then we
|
||||
# need not proceed further
|
||||
if (not instance_type.obj_attr_is_set('extra_specs')
|
||||
or not instance_type.extra_specs):
|
||||
if (not instance_type.obj_attr_is_set('extra_specs') or
|
||||
not instance_type.extra_specs):
|
||||
return True
|
||||
|
||||
metadata = utils.aggregate_metadata_get_by_host(host_state)
|
||||
|
@ -192,8 +192,8 @@ class HostState(object):
|
||||
'updated yet.', compute.uuid)
|
||||
return
|
||||
|
||||
if (self.updated and compute.updated_at
|
||||
and self.updated > compute.updated_at):
|
||||
if (self.updated and compute.updated_at and
|
||||
self.updated > compute.updated_at):
|
||||
return
|
||||
all_ram_mb = compute.memory_mb
|
||||
|
||||
@ -393,8 +393,8 @@ class HostManager(object):
|
||||
# Refreshing the mapping dict to remove all hosts that are no longer
|
||||
# part of the aggregate
|
||||
for host in self.host_aggregates_map:
|
||||
if (aggregate.id in self.host_aggregates_map[host]
|
||||
and host not in aggregate.hosts):
|
||||
if (aggregate.id in self.host_aggregates_map[host] and
|
||||
host not in aggregate.hosts):
|
||||
self.host_aggregates_map[host].remove(aggregate.id)
|
||||
|
||||
def delete_aggregate(self, aggregate):
|
||||
|
@ -482,8 +482,8 @@ def resources_from_request_spec(ctxt, spec_obj, host_manager):
|
||||
res_req.get_request_group(None).resources[rclass] = amount
|
||||
|
||||
requested_resources = (spec_obj.requested_resources
|
||||
if 'requested_resources' in spec_obj
|
||||
and spec_obj.requested_resources
|
||||
if 'requested_resources' in spec_obj and
|
||||
spec_obj.requested_resources
|
||||
else [])
|
||||
for group in requested_resources:
|
||||
res_req.add_request_group(group)
|
||||
@ -934,8 +934,8 @@ def _get_group_details(context, instance_uuid, user_group_hosts=None):
|
||||
msg = _("ServerGroupSoftAffinityWeigher not configured")
|
||||
LOG.error(msg)
|
||||
raise exception.UnsupportedPolicyException(reason=msg)
|
||||
if (not _SUPPORTS_SOFT_ANTI_AFFINITY
|
||||
and 'soft-anti-affinity' == group.policy):
|
||||
if (not _SUPPORTS_SOFT_ANTI_AFFINITY and
|
||||
'soft-anti-affinity' == group.policy):
|
||||
msg = _("ServerGroupSoftAntiAffinityWeigher not configured")
|
||||
LOG.error(msg)
|
||||
raise exception.UnsupportedPolicyException(reason=msg)
|
||||
|
@ -40,6 +40,7 @@ class CPUWeigher(weights.BaseHostWeigher):
|
||||
|
||||
def _weigh_object(self, host_state, weight_properties):
|
||||
"""Higher weights win. We want spreading to be the default."""
|
||||
vcpus_free = (host_state.vcpus_total * host_state.cpu_allocation_ratio
|
||||
- host_state.vcpus_used)
|
||||
vcpus_free = (
|
||||
host_state.vcpus_total * host_state.cpu_allocation_ratio -
|
||||
host_state.vcpus_used)
|
||||
return vcpus_free
|
||||
|
@ -50,8 +50,8 @@ class NUMAServersTestBase(base.ServersTestBase):
|
||||
def _setup_scheduler_service(self):
|
||||
# Enable the 'NUMATopologyFilter'
|
||||
self.flags(driver='filter_scheduler', group='scheduler')
|
||||
self.flags(enabled_filters=CONF.filter_scheduler.enabled_filters
|
||||
+ ['NUMATopologyFilter'],
|
||||
self.flags(enabled_filters=CONF.filter_scheduler.enabled_filters +
|
||||
['NUMATopologyFilter'],
|
||||
group='filter_scheduler')
|
||||
return self.start_service('scheduler')
|
||||
|
||||
|
@ -47,8 +47,8 @@ class ServerGroupTestBase(test.TestCase,
|
||||
api_major_version = 'v2.1'
|
||||
microversion = None
|
||||
|
||||
_enabled_filters = (CONF.filter_scheduler.enabled_filters
|
||||
+ ['ServerGroupAntiAffinityFilter',
|
||||
_enabled_filters = (CONF.filter_scheduler.enabled_filters +
|
||||
['ServerGroupAntiAffinityFilter',
|
||||
'ServerGroupAffinityFilter'])
|
||||
|
||||
# Override servicegroup parameters to make the tests run faster
|
||||
|
@ -5442,8 +5442,8 @@ class PortResourceRequestBasedSchedulingTestBase(
|
||||
# starts the scheduler
|
||||
if 'PciPassthroughFilter' not in CONF.filter_scheduler.enabled_filters:
|
||||
self.flags(
|
||||
enabled_filters=CONF.filter_scheduler.enabled_filters
|
||||
+ ['PciPassthroughFilter'],
|
||||
enabled_filters=CONF.filter_scheduler.enabled_filters +
|
||||
['PciPassthroughFilter'],
|
||||
group='filter_scheduler')
|
||||
|
||||
self.useFixture(
|
||||
|
@ -3043,18 +3043,18 @@ class ComputeTestCase(BaseTestCase,
|
||||
test_unrescue=False, fail_reboot=False,
|
||||
fail_running=False):
|
||||
reboot_type = soft and 'SOFT' or 'HARD'
|
||||
task_pending = (soft and task_states.REBOOT_PENDING
|
||||
or task_states.REBOOT_PENDING_HARD)
|
||||
task_started = (soft and task_states.REBOOT_STARTED
|
||||
or task_states.REBOOT_STARTED_HARD)
|
||||
expected_task = (soft and task_states.REBOOTING
|
||||
or task_states.REBOOTING_HARD)
|
||||
task_pending = (soft and task_states.REBOOT_PENDING or
|
||||
task_states.REBOOT_PENDING_HARD)
|
||||
task_started = (soft and task_states.REBOOT_STARTED or
|
||||
task_states.REBOOT_STARTED_HARD)
|
||||
expected_task = (soft and task_states.REBOOTING or
|
||||
task_states.REBOOTING_HARD)
|
||||
expected_tasks = (soft and (task_states.REBOOTING,
|
||||
task_states.REBOOT_PENDING,
|
||||
task_states.REBOOT_STARTED)
|
||||
or (task_states.REBOOTING_HARD,
|
||||
task_states.REBOOT_PENDING_HARD,
|
||||
task_states.REBOOT_STARTED_HARD))
|
||||
task_states.REBOOT_STARTED) or
|
||||
(task_states.REBOOTING_HARD,
|
||||
task_states.REBOOT_PENDING_HARD,
|
||||
task_states.REBOOT_STARTED_HARD))
|
||||
|
||||
# This is a true unit test, so we don't need the network stubs.
|
||||
fake_network.unset_stub_network_methods(self)
|
||||
|
@ -251,8 +251,8 @@ def fake_virtual_interface_get_by_instance(context, instance_id):
|
||||
def fake_virtual_interface_get_by_instance_and_network(context,
|
||||
instance_id,
|
||||
network_id):
|
||||
vif = [v for v in virtual_interfaces if v['instance_id'] == instance_id
|
||||
and v['network_id'] == network_id]
|
||||
vif = [v for v in virtual_interfaces if v['instance_id'] == instance_id and
|
||||
v['network_id'] == network_id]
|
||||
if not vif:
|
||||
return None
|
||||
return FakeModel(vif[0])
|
||||
|
@ -132,8 +132,9 @@ def fake_instance_obj(context, obj_instance_class=None, **updates):
|
||||
inst.vcpus = flavor.vcpus
|
||||
if 'memory_mb' in flavor and 'memory_mb' not in updates:
|
||||
inst.memory_mb = flavor.memory_mb
|
||||
if ('instance_type_id' not in inst or inst.instance_type_id is None
|
||||
and 'id' in flavor):
|
||||
if ('instance_type_id' not in inst or
|
||||
inst.instance_type_id is None and
|
||||
'id' in flavor):
|
||||
inst.instance_type_id = flavor.id
|
||||
inst.old_flavor = None
|
||||
inst.new_flavor = None
|
||||
|
@ -283,9 +283,9 @@ vifs = [{'id': 0,
|
||||
def get_associated(context, network_id, host=None, address=None):
|
||||
result = []
|
||||
for datum in fixed_ips:
|
||||
if (datum['network_id'] == network_id
|
||||
and datum['instance_uuid'] is not None
|
||||
and datum['virtual_interface_id'] is not None):
|
||||
if (datum['network_id'] == network_id and
|
||||
datum['instance_uuid'] is not None and
|
||||
datum['virtual_interface_id'] is not None):
|
||||
instance = instances[datum['instance_uuid']]
|
||||
if host and host != instance['host']:
|
||||
continue
|
||||
|
@ -555,8 +555,8 @@ class TestNeutronv2Base(test.TestCase):
|
||||
pre_create_port = (
|
||||
kwargs.get('_break') == 'post_list_networks' or
|
||||
((requested_networks is None or
|
||||
requested_networks.as_tuples() == [(None, None, None)])
|
||||
and len(nets) > 1) or
|
||||
requested_networks.as_tuples() == [(None, None, None)]) and
|
||||
len(nets) > 1) or
|
||||
kwargs.get('_break') == 'post_list_extensions')
|
||||
|
||||
if pre_create_port:
|
||||
|
@ -163,8 +163,8 @@ class SchedulerUtilsTestCase(test.NoDBTestCase):
|
||||
if force_nodes is None:
|
||||
force_nodes = []
|
||||
if with_retry:
|
||||
if ((len(force_hosts) == 1 and len(force_nodes) <= 1)
|
||||
or (len(force_nodes) == 1 and len(force_hosts) <= 1)):
|
||||
if ((len(force_hosts) == 1 and len(force_nodes) <= 1) or
|
||||
(len(force_nodes) == 1 and len(force_hosts) <= 1)):
|
||||
filter_properties = dict(force_hosts=force_hosts,
|
||||
force_nodes=force_nodes)
|
||||
elif len(force_hosts) > 1 or len(force_nodes) > 1:
|
||||
@ -208,8 +208,8 @@ class SchedulerUtilsTestCase(test.NoDBTestCase):
|
||||
self.assertEqual(expected_limits,
|
||||
filter_properties.get('limits'))
|
||||
|
||||
if (with_retry and enable_retry_force_hosts
|
||||
and enable_retry_force_nodes):
|
||||
if (with_retry and enable_retry_force_hosts and
|
||||
enable_retry_force_nodes):
|
||||
self.assertEqual([['fake-host', 'fake-node'],
|
||||
['fake-host', 'fake-node']],
|
||||
filter_properties['retry']['hosts'])
|
||||
|
@ -675,10 +675,10 @@ class GuestTestCase(test.NoDBTestCase):
|
||||
quiesce=True)
|
||||
self.domain.snapshotCreateXML(
|
||||
'<disk/>', flags=(
|
||||
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT
|
||||
| fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY
|
||||
| fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA
|
||||
| fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE))
|
||||
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT |
|
||||
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY |
|
||||
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA |
|
||||
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE))
|
||||
conf.to_xml.assert_called_once_with()
|
||||
|
||||
def test_pause(self):
|
||||
|
@ -663,8 +663,8 @@ class ClusterComputeResource(ManagedObject):
|
||||
summary.numCpuCores += host_summary.hardware.numCpuCores
|
||||
summary.numCpuThreads += host_summary.hardware.numCpuThreads
|
||||
summary.totalMemory += host_summary.hardware.memorySize
|
||||
free_memory = (host_summary.hardware.memorySize / units.Mi
|
||||
- host_summary.quickStats.overallMemoryUsage)
|
||||
free_memory = (host_summary.hardware.memorySize / units.Mi -
|
||||
host_summary.quickStats.overallMemoryUsage)
|
||||
summary.effectiveMemory += free_memory if connected else 0
|
||||
summary.numEffectiveHosts += 1 if connected else 0
|
||||
self.set("summary", summary)
|
||||
|
@ -279,9 +279,8 @@ class DriverVolumeBlockDevice(DriverBlockDevice):
|
||||
'device_type': None}
|
||||
|
||||
def _transform(self):
|
||||
if (not self._bdm_obj.source_type == self._valid_source
|
||||
or not self._bdm_obj.destination_type ==
|
||||
self._valid_destination):
|
||||
if (not self._bdm_obj.source_type == self._valid_source or
|
||||
not self._bdm_obj.destination_type == self._valid_destination):
|
||||
raise _InvalidType
|
||||
|
||||
self.update(
|
||||
@ -925,9 +924,9 @@ def is_implemented(bdm):
|
||||
|
||||
|
||||
def is_block_device_mapping(bdm):
|
||||
return (bdm.source_type in ('image', 'volume', 'snapshot', 'blank')
|
||||
and bdm.destination_type == 'volume'
|
||||
and is_implemented(bdm))
|
||||
return (bdm.source_type in ('image', 'volume', 'snapshot', 'blank') and
|
||||
bdm.destination_type == 'volume' and
|
||||
is_implemented(bdm))
|
||||
|
||||
|
||||
def get_volume_id(connection_info):
|
||||
|
@ -482,8 +482,8 @@ def _inject_files_into_fs(files, fs):
|
||||
for (path, contents) in files:
|
||||
# NOTE(wangpan): Ensure the parent dir of injecting file exists
|
||||
parent_dir = os.path.dirname(path)
|
||||
if (len(parent_dir) > 0 and parent_dir != "/"
|
||||
and not fs.has_file(parent_dir)):
|
||||
if (len(parent_dir) > 0 and parent_dir != "/" and
|
||||
not fs.has_file(parent_dir)):
|
||||
fs.make_path(parent_dir)
|
||||
fs.set_ownership(parent_dir, "root", "root")
|
||||
fs.set_permissions(parent_dir, 0o744)
|
||||
|
@ -110,8 +110,8 @@ class VFSGuestFS(vfs.VFS):
|
||||
"event": guestfs.event_to_string(ev),
|
||||
"eh": eh, "buf": buf, "array": array})
|
||||
|
||||
events = (guestfs.EVENT_APPLIANCE | guestfs.EVENT_LIBRARY
|
||||
| guestfs.EVENT_WARNING | guestfs.EVENT_TRACE)
|
||||
events = (guestfs.EVENT_APPLIANCE | guestfs.EVENT_LIBRARY |
|
||||
guestfs.EVENT_WARNING | guestfs.EVENT_TRACE)
|
||||
|
||||
self.handle.set_trace(True) # just traces libguestfs API calls
|
||||
self.handle.set_verbose(True)
|
||||
|
@ -586,8 +586,7 @@ def _get_desirable_cpu_topologies(flavor, image_meta, allow_threads=True,
|
||||
if numa_topology:
|
||||
min_requested_threads = None
|
||||
cell_topologies = [cell.cpu_topology for cell in numa_topology.cells
|
||||
if ('cpu_topology' in cell
|
||||
and cell.cpu_topology)]
|
||||
if ('cpu_topology' in cell and cell.cpu_topology)]
|
||||
if cell_topologies:
|
||||
min_requested_threads = min(
|
||||
topo.threads for topo in cell_topologies)
|
||||
@ -1759,8 +1758,8 @@ def numa_fit_instance_to_host(
|
||||
host_cell_perm, instance_topology.cells):
|
||||
try:
|
||||
cpuset_reserved = 0
|
||||
if (instance_topology.emulator_threads_isolated
|
||||
and len(chosen_instance_cells) == 0):
|
||||
if (instance_topology.emulator_threads_isolated and
|
||||
len(chosen_instance_cells) == 0):
|
||||
# For the case of isolate emulator threads, to
|
||||
# make predictable where that CPU overhead is
|
||||
# located we always configure it to be on host
|
||||
@ -1897,8 +1896,8 @@ def numa_usage_from_instances(host, instances, free=False):
|
||||
cpu_usage_diff *= max(map(len, hostcell.siblings))
|
||||
cpu_usage += sign * cpu_usage_diff
|
||||
|
||||
if (cellid == 0
|
||||
and instance.emulator_threads_isolated):
|
||||
if (cellid == 0 and
|
||||
instance.emulator_threads_isolated):
|
||||
# The emulator threads policy when defined
|
||||
# with 'isolate' makes the instance to consume
|
||||
# an additional pCPU as overhead. That pCPU is
|
||||
|
@ -1164,9 +1164,9 @@ class IronicDriver(virt_driver.ComputeDriver):
|
||||
|
||||
# validate we are ready to do the deploy
|
||||
validate_chk = self.ironicclient.call("node.validate", node_uuid)
|
||||
if (not validate_chk.deploy.get('result')
|
||||
or not validate_chk.power.get('result')
|
||||
or not validate_chk.storage.get('result')):
|
||||
if (not validate_chk.deploy.get('result') or
|
||||
not validate_chk.power.get('result') or
|
||||
not validate_chk.storage.get('result')):
|
||||
# something is wrong. undo what we have done
|
||||
self._cleanup_deploy(node, instance, network_info)
|
||||
raise exception.ValidationError(_(
|
||||
|
@ -2031,10 +2031,10 @@ class LibvirtDriver(driver.ComputeDriver):
|
||||
# redundant because LVM supports only cold snapshots.
|
||||
# It is necessary in case this situation changes in the
|
||||
# future.
|
||||
if (self._host.has_min_version(hv_type=host.HV_DRIVER_QEMU)
|
||||
and source_type != 'lvm'
|
||||
and not CONF.ephemeral_storage_encryption.enabled
|
||||
and not CONF.workarounds.disable_libvirt_livesnapshot
|
||||
if (self._host.has_min_version(hv_type=host.HV_DRIVER_QEMU) and
|
||||
source_type != 'lvm' and
|
||||
not CONF.ephemeral_storage_encryption.enabled and
|
||||
not CONF.workarounds.disable_libvirt_livesnapshot and
|
||||
# NOTE(rmk): We cannot perform live snapshots when a
|
||||
# managedSave file is present, so we will use the cold/legacy
|
||||
# method for instances which are shutdown or paused.
|
||||
@ -2043,7 +2043,7 @@ class LibvirtDriver(driver.ComputeDriver):
|
||||
# remove the restriction on PAUSED once we require
|
||||
# libvirt>=3.6.0 and qemu>=2.10 since that works with the
|
||||
# Pike Ubuntu Cloud Archive testing in Queens.
|
||||
and state not in (power_state.SHUTDOWN, power_state.PAUSED)):
|
||||
state not in (power_state.SHUTDOWN, power_state.PAUSED)):
|
||||
live_snapshot = True
|
||||
# Abort is an idempotent operation, so make sure any block
|
||||
# jobs which may have failed are ended. This operation also
|
||||
@ -5273,12 +5273,12 @@ class LibvirtDriver(driver.ComputeDriver):
|
||||
|
||||
if not CONF.libvirt.num_pcie_ports:
|
||||
return False
|
||||
if (caps.host.cpu.arch == fields.Architecture.AARCH64
|
||||
and guest.os_mach_type.startswith('virt')):
|
||||
if (caps.host.cpu.arch == fields.Architecture.AARCH64 and
|
||||
guest.os_mach_type.startswith('virt')):
|
||||
return True
|
||||
if (caps.host.cpu.arch == fields.Architecture.X86_64
|
||||
and guest.os_mach_type is not None
|
||||
and 'q35' in guest.os_mach_type):
|
||||
if (caps.host.cpu.arch == fields.Architecture.X86_64 and
|
||||
guest.os_mach_type is not None and
|
||||
'q35' in guest.os_mach_type):
|
||||
return True
|
||||
return False
|
||||
|
||||
@ -5432,8 +5432,8 @@ class LibvirtDriver(driver.ComputeDriver):
|
||||
|
||||
@staticmethod
|
||||
def _guest_add_spice_channel(guest):
|
||||
if (CONF.spice.enabled and CONF.spice.agent_enabled
|
||||
and guest.virt_type not in ('lxc', 'uml', 'xen')):
|
||||
if (CONF.spice.enabled and CONF.spice.agent_enabled and
|
||||
guest.virt_type not in ('lxc', 'uml', 'xen')):
|
||||
channel = vconfig.LibvirtConfigGuestChannel()
|
||||
channel.type = 'spicevmc'
|
||||
channel.target_name = "com.redhat.spice.0"
|
||||
@ -5457,8 +5457,8 @@ class LibvirtDriver(driver.ComputeDriver):
|
||||
def _guest_add_watchdog_action(guest, flavor, image_meta):
|
||||
# image meta takes precedence over flavor extra specs; disable the
|
||||
# watchdog action by default
|
||||
watchdog_action = (flavor.extra_specs.get('hw:watchdog_action')
|
||||
or 'disabled')
|
||||
watchdog_action = (flavor.extra_specs.get('hw:watchdog_action') or
|
||||
'disabled')
|
||||
watchdog_action = image_meta.properties.get('hw_watchdog_action',
|
||||
watchdog_action)
|
||||
# NB(sross): currently only actually supported by KVM/QEmu
|
||||
@ -8258,8 +8258,8 @@ class LibvirtDriver(driver.ComputeDriver):
|
||||
is_shared_instance_path = True
|
||||
if migrate_data:
|
||||
is_shared_instance_path = migrate_data.is_shared_instance_path
|
||||
if (migrate_data.obj_attr_is_set("serial_listen_ports")
|
||||
and migrate_data.serial_listen_ports):
|
||||
if (migrate_data.obj_attr_is_set("serial_listen_ports") and
|
||||
migrate_data.serial_listen_ports):
|
||||
# Releases serial ports reserved.
|
||||
for port in migrate_data.serial_listen_ports:
|
||||
serial_console.release_port(
|
||||
|
@ -576,12 +576,12 @@ class Guest(object):
|
||||
:param reuse_ext: Reuse any existing external files
|
||||
:param quiesce: Use QGA to quiece all mounted file systems
|
||||
"""
|
||||
flags = no_metadata and (libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA
|
||||
or 0)
|
||||
flags |= disk_only and (libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY
|
||||
or 0)
|
||||
flags |= reuse_ext and (libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT
|
||||
or 0)
|
||||
flags = no_metadata and (
|
||||
libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA or 0)
|
||||
flags |= disk_only and (
|
||||
libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY or 0)
|
||||
flags |= reuse_ext and (
|
||||
libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT or 0)
|
||||
flags |= quiesce and libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE or 0
|
||||
|
||||
device_xml = conf.to_xml()
|
||||
|
@ -647,8 +647,9 @@ class Host(object):
|
||||
self._caps.parse_str(xmlstr)
|
||||
# NOTE(mriedem): Don't attempt to get baseline CPU features
|
||||
# if libvirt can't determine the host cpu model.
|
||||
if (hasattr(libvirt, 'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES')
|
||||
and self._caps.host.cpu.model is not None):
|
||||
if (hasattr(libvirt,
|
||||
'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES') and
|
||||
self._caps.host.cpu.model is not None):
|
||||
try:
|
||||
xml_str = self._caps.host.cpu.to_xml()
|
||||
if six.PY3 and isinstance(xml_str, six.binary_type):
|
||||
|
@ -40,8 +40,8 @@ libvirt = None
|
||||
def graphics_listen_addrs(migrate_data):
|
||||
"""Returns listen addresses of vnc/spice from a LibvirtLiveMigrateData"""
|
||||
listen_addrs = None
|
||||
if (migrate_data.obj_attr_is_set('graphics_listen_addr_vnc')
|
||||
or migrate_data.obj_attr_is_set('graphics_listen_addr_spice')):
|
||||
if (migrate_data.obj_attr_is_set('graphics_listen_addr_vnc') or
|
||||
migrate_data.obj_attr_is_set('graphics_listen_addr_spice')):
|
||||
listen_addrs = {'vnc': None, 'spice': None}
|
||||
if migrate_data.obj_attr_is_set('graphics_listen_addr_vnc'):
|
||||
listen_addrs['vnc'] = str(migrate_data.graphics_listen_addr_vnc)
|
||||
|
@ -173,8 +173,9 @@ class XenAPIDriver(driver.ComputeDriver):
|
||||
# Some padding is done to each value to fit all available VM data
|
||||
memory_mb = instance_info['memory_mb']
|
||||
vcpus = instance_info.get('vcpus', 1)
|
||||
overhead = ((memory_mb * OVERHEAD_PER_MB) + (vcpus * OVERHEAD_PER_VCPU)
|
||||
+ OVERHEAD_BASE)
|
||||
overhead = ((memory_mb * OVERHEAD_PER_MB) +
|
||||
(vcpus * OVERHEAD_PER_VCPU) +
|
||||
OVERHEAD_BASE)
|
||||
overhead = math.ceil(overhead)
|
||||
return {'memory_mb': overhead}
|
||||
|
||||
|
4
tox.ini
4
tox.ini
@ -237,10 +237,10 @@ commands = bandit -r nova -x tests -n 5 -ll
|
||||
#
|
||||
# W504 skipped since you must choose either W503 or W504 (they conflict)
|
||||
#
|
||||
# W503, W605, E731, and E741 temporarily skipped because of the number of
|
||||
# W605, E731, and E741 temporarily skipped because of the number of
|
||||
# these that have to be fixed
|
||||
enable-extensions = H106,H203,H904
|
||||
ignore = E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E251,H405,W504,W605,W503,E731,E741
|
||||
ignore = E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E251,H405,W504,W605,E731,E741
|
||||
exclude = .venv,.git,.tox,dist,*lib/python*,*egg,build,tools/xenserver*,releasenotes
|
||||
# To get a list of functions that are more complex than 25, set max-complexity
|
||||
# to 25 and run 'tox -epep8'.
|
||||
|
Loading…
x
Reference in New Issue
Block a user