Enable python34 tests for nova/tests/unit/scheduler/test*.py
nova/pci/stats.py: need to specify a key parameter which is a function that returns a sorting key. since the original intention with pool_cmp was to sort by number of items in each pool, we just use key parameter with a lambda nova/scheduler/filters/trusted_filter.py nova/scheduler/host_manager.py needed list() so we could delete stuff inside the loop in py3 nova/tests/unit/scheduler/test_filter_scheduler.py needed the import of test as some tests were failing in py3, added noqa as test is not explicitly referenced within the module nova/tests/unit/scheduler/test_scheduler_options.py need BytesIO for py3, StringIO will fail in many tests nova/tests/unit/scheduler/test_scheduler_utils.py message is not available in py3, so we need to adjust use test.nested already added in a previous commit nova/test.py if we have a key which is a list, treat it just like a set nova/utils.py use reduce from six and explicitly typcast version_int as an integer Change-Id: Ic862873f38767f12b990a030b5812ae50a44c4d0
This commit is contained in:
parent
8a73c0430d
commit
5bb1933cc4
@ -60,7 +60,7 @@ class PciDeviceStats(object):
|
||||
# NOTE(sbauza): Stats are a PCIDevicePoolList object
|
||||
self.pools = [pci_pool.to_dict()
|
||||
for pci_pool in stats] if stats else []
|
||||
self.pools.sort(self.pool_cmp)
|
||||
self.pools.sort(key=lambda item: len(item))
|
||||
|
||||
def _equal_properties(self, dev, entry, matching_keys):
|
||||
return all(dev.get(prop) == entry.get(prop)
|
||||
@ -102,7 +102,7 @@ class PciDeviceStats(object):
|
||||
dev_pool['count'] = 0
|
||||
dev_pool['devices'] = []
|
||||
self.pools.append(dev_pool)
|
||||
self.pools.sort(self.pool_cmp)
|
||||
self.pools.sort(key=lambda item: len(item))
|
||||
pool = dev_pool
|
||||
pool['count'] += 1
|
||||
pool['devices'].append(dev)
|
||||
@ -235,10 +235,6 @@ class PciDeviceStats(object):
|
||||
for r in requests]):
|
||||
raise exception.PciDeviceRequestFailed(requests=requests)
|
||||
|
||||
@staticmethod
|
||||
def pool_cmp(dev1, dev2):
|
||||
return len(dev1) - len(dev2)
|
||||
|
||||
def __iter__(self):
|
||||
# 'devices' shouldn't be part of stats
|
||||
pools = []
|
||||
|
@ -223,7 +223,8 @@ class ComputeAttestationCache(object):
|
||||
|
||||
def _update_cache(self):
|
||||
self._invalidate_caches()
|
||||
states = self.attestservice.do_attestation(self.compute_nodes.keys())
|
||||
states = self.attestservice.do_attestation(
|
||||
list(self.compute_nodes.keys()))
|
||||
if states is None:
|
||||
return
|
||||
for state in states:
|
||||
|
@ -452,7 +452,7 @@ class HostManager(object):
|
||||
def _strip_ignore_hosts(host_map, hosts_to_ignore):
|
||||
ignored_hosts = []
|
||||
for host in hosts_to_ignore:
|
||||
for (hostname, nodename) in host_map.keys():
|
||||
for (hostname, nodename) in list(host_map.keys()):
|
||||
if host == hostname:
|
||||
del host_map[(hostname, nodename)]
|
||||
ignored_hosts.append(host)
|
||||
@ -462,7 +462,7 @@ class HostManager(object):
|
||||
|
||||
def _match_forced_hosts(host_map, hosts_to_force):
|
||||
forced_hosts = []
|
||||
for (hostname, nodename) in host_map.keys():
|
||||
for (hostname, nodename) in list(host_map.keys()):
|
||||
if hostname not in hosts_to_force:
|
||||
del host_map[(hostname, nodename)]
|
||||
else:
|
||||
@ -478,7 +478,7 @@ class HostManager(object):
|
||||
|
||||
def _match_forced_nodes(host_map, nodes_to_force):
|
||||
forced_nodes = []
|
||||
for (hostname, nodename) in host_map.keys():
|
||||
for (hostname, nodename) in list(host_map.keys()):
|
||||
if nodename not in nodes_to_force:
|
||||
del host_map[(hostname, nodename)]
|
||||
else:
|
||||
|
@ -295,7 +295,7 @@ class TestCase(testtools.TestCase):
|
||||
observed = jsonutils.loads(observed)
|
||||
|
||||
def sort_key(x):
|
||||
if isinstance(x, set) or isinstance(x, datetime.datetime):
|
||||
if isinstance(x, (set, list)) or isinstance(x, datetime.datetime):
|
||||
return str(x)
|
||||
if isinstance(x, dict):
|
||||
items = ((sort_key(key), sort_key(value))
|
||||
|
@ -23,6 +23,7 @@ from nova.scheduler import filter_scheduler
|
||||
from nova.scheduler import host_manager
|
||||
from nova.scheduler import utils as scheduler_utils
|
||||
from nova.scheduler import weights
|
||||
from nova import test # noqa
|
||||
from nova.tests.unit.scheduler import fakes
|
||||
from nova.tests.unit.scheduler import test_scheduler
|
||||
|
||||
|
@ -17,9 +17,9 @@ Tests For PickledScheduler.
|
||||
"""
|
||||
|
||||
import datetime
|
||||
import StringIO
|
||||
|
||||
from oslo_serialization import jsonutils
|
||||
import six
|
||||
|
||||
from nova.scheduler import scheduler_options
|
||||
from nova import test
|
||||
@ -45,7 +45,9 @@ class FakeSchedulerOptions(scheduler_options.SchedulerOptions):
|
||||
|
||||
def _get_file_handle(self, filename):
|
||||
self.file_was_loaded = True
|
||||
return StringIO.StringIO(self._file_data)
|
||||
if six.PY3:
|
||||
return six.BytesIO(self._file_data.encode('utf-8'))
|
||||
return six.StringIO(self._file_data)
|
||||
|
||||
def _get_time_now(self):
|
||||
return self._time_now
|
||||
|
@ -15,12 +15,12 @@
|
||||
"""
|
||||
Tests For Scheduler Utils
|
||||
"""
|
||||
import contextlib
|
||||
import uuid
|
||||
|
||||
import mock
|
||||
from mox3 import mox
|
||||
from oslo_config import cfg
|
||||
import six
|
||||
|
||||
from nova.compute import flavors
|
||||
from nova.compute import utils as compute_utils
|
||||
@ -197,7 +197,7 @@ class SchedulerUtilsTestCase(test.NoDBTestCase):
|
||||
scheduler_utils.populate_retry,
|
||||
filter_properties, 'fake-uuid')
|
||||
# make sure 'msg' is a substring of the complete exception text
|
||||
self.assertIn(msg, nvh.message)
|
||||
self.assertIn(msg, six.text_type(nvh))
|
||||
|
||||
def _check_parse_options(self, opts, sep, converter, expected):
|
||||
good = scheduler_utils.parse_options(opts,
|
||||
@ -248,7 +248,7 @@ class SchedulerUtilsTestCase(test.NoDBTestCase):
|
||||
def _get_group_details(self, group, policy=None):
|
||||
group_hosts = ['hostB']
|
||||
|
||||
with contextlib.nested(
|
||||
with test.nested(
|
||||
mock.patch.object(objects.InstanceGroup, 'get_by_instance_uuid',
|
||||
return_value=group),
|
||||
mock.patch.object(objects.InstanceGroup, 'get_hosts',
|
||||
@ -297,7 +297,7 @@ class SchedulerUtilsTestCase(test.NoDBTestCase):
|
||||
group.members = [instance.uuid]
|
||||
group.policies = [policy]
|
||||
|
||||
with contextlib.nested(
|
||||
with test.nested(
|
||||
mock.patch.object(objects.InstanceGroup, 'get_by_instance_uuid',
|
||||
return_value=group),
|
||||
mock.patch.object(objects.InstanceGroup, 'get_hosts',
|
||||
|
42
tox.ini
42
tox.ini
@ -38,6 +38,48 @@ commands =
|
||||
python -m testtools.run \
|
||||
nova.tests.unit.compute.test_keypairs \
|
||||
nova.tests.unit.db.test_db_api \
|
||||
nova.tests.unit.scheduler.filters.test_affinity_filters \
|
||||
nova.tests.unit.scheduler.filters.test_aggregate_image_properties_isolation_filters \
|
||||
nova.tests.unit.scheduler.filters.test_aggregate_instance_extra_specs_filters \
|
||||
nova.tests.unit.scheduler.filters.test_aggregate_multitenancy_isolation_filters \
|
||||
nova.tests.unit.scheduler.filters.test_availability_zone_filters \
|
||||
nova.tests.unit.scheduler.filters.test_compute_capabilities_filters \
|
||||
nova.tests.unit.scheduler.filters.test_compute_filters \
|
||||
nova.tests.unit.scheduler.filters.test_core_filters \
|
||||
nova.tests.unit.scheduler.filters.test_disk_filters \
|
||||
nova.tests.unit.scheduler.filters.test_exact_core_filter \
|
||||
nova.tests.unit.scheduler.filters.test_exact_disk_filter \
|
||||
nova.tests.unit.scheduler.filters.test_exact_ram_filter \
|
||||
nova.tests.unit.scheduler.filters.test_extra_specs_ops \
|
||||
nova.tests.unit.scheduler.filters.test_image_props_filters \
|
||||
nova.tests.unit.scheduler.filters.test_io_ops_filters \
|
||||
nova.tests.unit.scheduler.filters.test_isolated_hosts_filter \
|
||||
nova.tests.unit.scheduler.filters.test_json_filters \
|
||||
nova.tests.unit.scheduler.filters.test_metrics_filters \
|
||||
nova.tests.unit.scheduler.filters.test_num_instances_filters \
|
||||
nova.tests.unit.scheduler.filters.test_numa_topology_filters \
|
||||
nova.tests.unit.scheduler.filters.test_pci_passthrough_filters \
|
||||
nova.tests.unit.scheduler.filters.test_ram_filters \
|
||||
nova.tests.unit.scheduler.filters.test_retry_filters \
|
||||
nova.tests.unit.scheduler.filters.test_trusted_filters \
|
||||
nova.tests.unit.scheduler.filters.test_type_filters \
|
||||
nova.tests.unit.scheduler.filters.test_utils \
|
||||
nova.tests.unit.scheduler.test_caching_scheduler \
|
||||
nova.tests.unit.scheduler.test_chance_scheduler \
|
||||
nova.tests.unit.scheduler.test_client \
|
||||
nova.tests.unit.scheduler.test_filter_scheduler \
|
||||
nova.tests.unit.scheduler.test_filters \
|
||||
nova.tests.unit.scheduler.test_host_filters \
|
||||
nova.tests.unit.scheduler.test_host_manager \
|
||||
nova.tests.unit.scheduler.test_ironic_host_manager \
|
||||
nova.tests.unit.scheduler.test_rpcapi \
|
||||
nova.tests.unit.scheduler.test_scheduler \
|
||||
nova.tests.unit.scheduler.test_scheduler_options \
|
||||
nova.tests.unit.scheduler.test_scheduler_utils \
|
||||
nova.tests.unit.scheduler.weights.test_weights_hosts \
|
||||
nova.tests.unit.scheduler.weights.test_weights_ioopsweight \
|
||||
nova.tests.unit.scheduler.weights.test_weights_metrics \
|
||||
nova.tests.unit.scheduler.weights.test_weights_ram \
|
||||
nova.tests.unit.objects.test_agent \
|
||||
nova.tests.unit.objects.test_aggregate \
|
||||
nova.tests.unit.objects.test_bandwidth_usage \
|
||||
|
Loading…
x
Reference in New Issue
Block a user