[codespell] Fixes for latest version

Change-Id: I742b0b1c6e0cbfdeec40856eaae9021ceda7cb59
Signed-off-by: Stephen Finucane <stephenfin@redhat.com>
This commit is contained in:
Stephen Finucane 2024-07-09 11:33:07 +01:00
parent ed2bf3699d
commit 0db5dbc8ab
12 changed files with 30 additions and 28 deletions

View File

@ -10,4 +10,6 @@ usera
dettach
excpt
imigration
childs
childs
assertin
notin

View File

@ -1556,7 +1556,7 @@ class ResourceTracker(object):
LOG.debug('Migration instance not found: %s', e)
continue
# Skip migation if instance is neither in a resize state nor is
# Skip migration if instance is neither in a resize state nor is
# live-migrating.
if (not _instance_in_resize_state(instances[uuid]) and not
_instance_is_live_migrating(instances[uuid])):

View File

@ -171,12 +171,12 @@ class HostMappingList(base.ObjectListBase, base.NovaObject):
query = query.filter(api_models.HostMapping.cell_id == cell_id)
return query.all()
@ base.remotable_classmethod
@base.remotable_classmethod
def get_by_cell_id(cls, context, cell_id):
db_mappings = cls._get_from_db(context, cell_id)
return base.obj_make_list(context, cls(), HostMapping, db_mappings)
@ base.remotable_classmethod
@base.remotable_classmethod
def get_all(cls, context):
db_mappings = cls._get_from_db(context)
return base.obj_make_list(context, cls(), HostMapping, db_mappings)

View File

@ -1227,7 +1227,7 @@ def _server_group_count_members_by_user_legacy(context, group, user_id):
found = greenthread.wait()
instances = instances + found
# Count build requests using the same filters to catch group members
# that are not yet creatd in a cell.
# that are not yet created in a cell.
# NOTE(mriedem): BuildRequestList.get_by_filters is not very efficient for
# what we need and we can optimize this with a new query method.
build_requests = objects.BuildRequestList.get_by_filters(context, filters)

View File

@ -1361,7 +1361,7 @@ class Domain(object):
else:
# FIXME(sean-k-mooney): We don't currently handle attaching
# or detaching hostdevs but we have tests that assume we do so
# this is an error not an exception. This affects PCI passthough,
# this is an error not an exception. This affects PCI passthrough,
# vGPUs and PF neutron ports.
LOG.error(
"Trying to attach an unsupported device type."
@ -1412,7 +1412,7 @@ class Domain(object):
# FIXME(sean-k-mooney): We don't currently handle attaching or
# detaching hostdevs but we have tests that assume we do so this is
# an error not an exception. This affects PCI passthough, vGPUs and
# an error not an exception. This affects PCI passthrough, vGPUs and
# PF neutron ports
LOG.error(
"Trying to detach an unsupported device type."

View File

@ -95,7 +95,7 @@ class NUMALiveMigrationBase(
class NUMALiveMigrationPositiveBase(NUMALiveMigrationBase):
"""Base for all tests that expect the live migration to actually start.
Sets up an "environment" with two computes, each with 4 CPUs spead evenly
Sets up an "environment" with two computes, each with 4 CPUs spread evenly
across 2 NUMA nodes.
"""

View File

@ -26,7 +26,7 @@ class IgnoreDeletedServerGroupsTest(
"""Regression test for bug 1890244
If instance are created as member of server groups it
should be possibel to evacuate them if the server groups are
should be possible to evacuate them if the server groups are
deleted prior to the host failure.
"""

View File

@ -69,12 +69,12 @@ class Bug1995153RegressionTest(
def test_socket_policy_bug_1995153(self):
"""Previously, the numa_usage_from_instance_numa() method in
hardware.py saved the host NUMAToplogy object with NUMACells that have
hardware.py saved the host NUMATopology object with NUMACells that have
no `socket` set. This was an omission in the original implementation of
the `socket` PCI NUMA affinity policy. The consequence was that any
code path that called into numa_usage_from_instance_numa() would
clobber the host NUMA topology in the database with a socket-less
version. Booting an instance with NUMA toplogy would do that, for
version. Booting an instance with NUMA topology would do that, for
example. If then a second instance was booted with the `socket` PCI
NUMA affinity policy, it would read the socket-less host NUMATopology
from the database, and error out with a NotImplementedError. This was

View File

@ -75,20 +75,20 @@ class _TestVirtCPUModel(object):
self.assertRaises(ValueError, setattr, model, 'arch', 'foo')
def test_serialize(self):
modelin = objects.VirtCPUModel(**fake_vcpumodel_dict)
modelout = objects.VirtCPUModel.from_json(modelin.to_json())
model_in = objects.VirtCPUModel(**fake_vcpumodel_dict)
model_out = objects.VirtCPUModel.from_json(model_in.to_json())
self.assertEqual(modelin.mode, modelout.mode)
self.assertEqual(modelin.model, modelout.model)
self.assertEqual(modelin.vendor, modelout.vendor)
self.assertEqual(modelin.arch, modelout.arch)
self.assertEqual(modelin.match, modelout.match)
self.assertEqual(modelin.features[0].policy,
modelout.features[0].policy)
self.assertEqual(modelin.features[0].name, modelout.features[0].name)
self.assertEqual(modelin.topology.sockets, modelout.topology.sockets)
self.assertEqual(modelin.topology.cores, modelout.topology.cores)
self.assertEqual(modelin.topology.threads, modelout.topology.threads)
self.assertEqual(model_in.mode, model_out.mode)
self.assertEqual(model_in.model, model_out.model)
self.assertEqual(model_in.vendor, model_out.vendor)
self.assertEqual(model_in.arch, model_out.arch)
self.assertEqual(model_in.match, model_out.match)
self.assertEqual(model_in.features[0].policy,
model_out.features[0].policy)
self.assertEqual(model_in.features[0].name, model_out.features[0].name)
self.assertEqual(model_in.topology.sockets, model_out.topology.sockets)
self.assertEqual(model_in.topology.cores, model_out.topology.cores)
self.assertEqual(model_in.topology.threads, model_out.topology.threads)
class TestVirtCPUModel(test_objects._LocalTest,

View File

@ -11132,7 +11132,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
None,
destroy_secrets=False
)
# Assert that no attempt is made to delete the volume secert
# Assert that no attempt is made to delete the volume secret
mock_delete_secret.assert_not_called()
drvr._detach_encryptor(
@ -11145,7 +11145,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
None,
destroy_secrets=True
)
# Assert that volume secert is deleted
# Assert that volume secret is deleted
mock_delete_secret.assert_called_once_with('volume', uuids.volume_id)
def test_allow_native_luksv1(self):

View File

@ -2212,7 +2212,7 @@ class LibvirtDriver(driver.ComputeDriver):
"""
# NOTE(lyarwood): Skip any attempt to fetch encryption metadata or the
# actual passphrase from the key manager if a libvirt secert already
# actual passphrase from the key manager if a libvirt secret already
# exists locally for the volume. This suggests that the instance was
# only powered off or the underlying host rebooted.
volume_id = driver_block_device.get_volume_id(connection_info)

View File

@ -2,7 +2,7 @@
issues:
- |
The libvirt virt driver in Nova implements power on and hard reboot by
destroying the domain first and unpluging the vifs then recreating the
destroying the domain first and unplugging the vifs then recreating the
domain and replugging the vifs. However nova does not wait for the
network-vif-plugged event before unpause the domain. This can cause
the domain to start running and requesting IP via DHCP before the