Merge "Rename CinderFixtureNewAttachFlow to CinderFixture"

This commit is contained in:
Zuul 2019-07-03 23:00:19 +00:00 committed by Gerrit Code Review
commit 7279d6fa00
19 changed files with 48 additions and 49 deletions

View File

@ -1636,8 +1636,7 @@ class EventReporterStub(fixtures.Fixture):
lambda *args, **kwargs: mock.MagicMock()))
# TODO(mriedem): Just rename this to be CinderFixture.
class CinderFixtureNewAttachFlow(fixtures.Fixture):
class CinderFixture(fixtures.Fixture):
"""A fixture to volume operations with the new Cinder attach/detach API"""
# the default project_id in OSAPIFixtures
@ -1658,7 +1657,7 @@ class CinderFixtureNewAttachFlow(fixtures.Fixture):
IMAGE_WITH_TRAITS_BACKED_VOL = '6194fc02-c60e-4a01-a8e5-600798208b5f'
def __init__(self, test):
super(CinderFixtureNewAttachFlow, self).__init__()
super(CinderFixture, self).__init__()
self.test = test
self.swap_volume_instance_uuid = None
self.swap_volume_instance_error_uuid = None
@ -1684,7 +1683,7 @@ class CinderFixtureNewAttachFlow(fixtures.Fixture):
break
def setUp(self):
super(CinderFixtureNewAttachFlow, self).setUp()
super(CinderFixture, self).setUp()
def fake_get(self_api, context, volume_id, microversion=None):
# Check for the special swap volumes.

View File

@ -369,7 +369,7 @@ class ServersSampleJson267Test(ServersSampleBase):
def setUp(self):
super(ServersSampleJson267Test, self).setUp()
self.useFixture(nova_fixtures.CinderFixtureNewAttachFlow(self))
self.useFixture(nova_fixtures.CinderFixture(self))
def test_servers_post(self):
return self._post_server(use_common_server_api_samples=False)

View File

@ -350,7 +350,7 @@ class VolumeAttachmentsSampleV249(VolumeAttachmentsSample):
def setUp(self):
super(VolumeAttachmentsSampleV249, self).setUp()
self.useFixture(fixtures.CinderFixtureNewAttachFlow(self))
self.useFixture(fixtures.CinderFixture(self))
def _get_vol_attachment_subs(self, subs):
return dict(subs, tag='foo')

View File

@ -54,7 +54,7 @@ class LiveMigrationCinderFailure(integrated_helpers._IntegratedTestBase,
self.compute2 = self.start_service('compute', host='host2')
def test_live_migrate_attachment_delete_fails(self):
self.useFixture(nova_fixtures.CinderFixtureNewAttachFlow(self))
self.useFixture(nova_fixtures.CinderFixture(self))
server = self.api.post_server({
'server': {
'flavorRef': 1,

View File

@ -33,7 +33,7 @@ class TestInstanceNotificationSampleWithMultipleCompute(
super(TestInstanceNotificationSampleWithMultipleCompute, self).setUp()
self.neutron = fixtures.NeutronFixture(self)
self.useFixture(self.neutron)
self.cinder = fixtures.CinderFixtureNewAttachFlow(self)
self.cinder = fixtures.CinderFixture(self)
self.useFixture(self.cinder)
self.useFixture(fixtures.AllServicesCurrent())
@ -326,7 +326,7 @@ class TestInstanceNotificationSample(
super(TestInstanceNotificationSample, self).setUp()
self.neutron = fixtures.NeutronFixture(self)
self.useFixture(self.neutron)
self.cinder = fixtures.CinderFixtureNewAttachFlow(self)
self.cinder = fixtures.CinderFixture(self)
self.useFixture(self.cinder)
def _wait_until_swap_volume(self, server, volume_id):

View File

@ -28,7 +28,7 @@ class TestVolumeUsageNotificationSample(
super(TestVolumeUsageNotificationSample, self).setUp()
self.neutron = fixtures.NeutronFixture(self)
self.useFixture(self.neutron)
self.cinder = fixtures.CinderFixtureNewAttachFlow(self)
self.cinder = fixtures.CinderFixture(self)
self.useFixture(self.cinder)
def _setup_server_with_volume_attached(self):

View File

@ -59,11 +59,11 @@ class DeleteWithReservedVolumes(integrated_helpers._IntegratedTestBase,
def test_delete_with_reserved_volumes_new(self):
self.cinder = self.useFixture(
nova_fixtures.CinderFixtureNewAttachFlow(self))
nova_fixtures.CinderFixture(self))
# Create a server which should go to ERROR state because we don't
# have any active computes.
volume_id = nova_fixtures.CinderFixtureNewAttachFlow.IMAGE_BACKED_VOL
volume_id = nova_fixtures.CinderFixture.IMAGE_BACKED_VOL
server = self._create_error_server(volume_id)
server_id = server['id']

View File

@ -46,7 +46,7 @@ class TestLocalDeleteAttachedVolumes(test.TestCase):
self.useFixture(policy_fixture.RealPolicyFixture())
# We need the CinderFixture to stub out the volume API.
self.cinder = self.useFixture(
nova_fixtures.CinderFixtureNewAttachFlow(self))
nova_fixtures.CinderFixture(self))
# The NeutronFixture is needed to stub out validate_networks in API.
self.useFixture(nova_fixtures.NeutronFixture(self))
# Use the PlacementFixture to avoid annoying warnings in the logs.

View File

@ -34,7 +34,7 @@ class RebuildVolumeBackedSameImage(integrated_helpers._IntegratedTestBase,
def setUp(self):
super(RebuildVolumeBackedSameImage, self).setUp()
# We are creating a volume-backed server so we need the CinderFixture.
self.useFixture(nova_fixtures.CinderFixtureNewAttachFlow(self))
self.useFixture(nova_fixtures.CinderFixture(self))
def _setup_scheduler_service(self):
# Add the IsolatedHostsFilter to the list of enabled filters since it
@ -46,7 +46,7 @@ class RebuildVolumeBackedSameImage(integrated_helpers._IntegratedTestBase,
def test_volume_backed_rebuild_same_image(self):
# First create our server as normal.
volume_id = nova_fixtures.CinderFixtureNewAttachFlow.IMAGE_BACKED_VOL
volume_id = nova_fixtures.CinderFixture.IMAGE_BACKED_VOL
server_req_body = {
# There is no imageRef because this is boot from volume.
'server': {

View File

@ -39,7 +39,7 @@ class TestBootFromVolumeIsolatedHostsFilter(
self.useFixture(policy_fixture.RealPolicyFixture())
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(nova_fixtures.CinderFixtureNewAttachFlow(self))
self.useFixture(nova_fixtures.CinderFixture(self))
self.useFixture(func_fixtures.PlacementFixture())
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
@ -71,7 +71,7 @@ class TestBootFromVolumeIsolatedHostsFilter(
def test_boot_from_volume_with_isolated_image(self):
# Create our server without networking just to keep things simple.
image_id = nova_fixtures.CinderFixtureNewAttachFlow.IMAGE_BACKED_VOL
image_id = nova_fixtures.CinderFixture.IMAGE_BACKED_VOL
server_req_body = {
# There is no imageRef because this is boot from volume.
'server': {

View File

@ -32,7 +32,7 @@ class TestRescheduleWithVolumesAttached(
super(TestRescheduleWithVolumesAttached, self).setUp()
# Use the new attach flow fixture for cinder
cinder_fixture = nova_fixtures.CinderFixtureNewAttachFlow(self)
cinder_fixture = nova_fixtures.CinderFixture(self)
self.cinder = self.useFixture(cinder_fixture)
self.useFixture(policy_fixture.RealPolicyFixture())
self.useFixture(nova_fixtures.NeutronFixture(self))
@ -63,7 +63,7 @@ class TestRescheduleWithVolumesAttached(
def test_reschedule_with_volume_attached(self):
# Boot a volume backed instance
volume_id = nova_fixtures.CinderFixtureNewAttachFlow.IMAGE_BACKED_VOL
volume_id = nova_fixtures.CinderFixture.IMAGE_BACKED_VOL
server_request = {
'name': 'server',
'flavorRef': self.flavor_id,

View File

@ -47,7 +47,7 @@ class BootFromVolumeOverQuotaRaceDeleteTest(
super(BootFromVolumeOverQuotaRaceDeleteTest, self).setUp()
# We need the cinder fixture for boot from volume testing.
self.cinder_fixture = self.useFixture(
nova_fixtures.CinderFixtureNewAttachFlow(self))
nova_fixtures.CinderFixture(self))
# Use the standard fixtures.
self.useFixture(policy_fixture.RealPolicyFixture())
self.useFixture(nova_fixtures.NeutronFixture(self))
@ -66,7 +66,7 @@ class BootFromVolumeOverQuotaRaceDeleteTest(
# volume attachment record for the given pre-existing volume.
# We also tag the server since tags, like BDMs, should be created in
# the cell database along with the instance.
volume_id = nova_fixtures.CinderFixtureNewAttachFlow.IMAGE_BACKED_VOL
volume_id = nova_fixtures.CinderFixture.IMAGE_BACKED_VOL
server = {
'server': {
'name': 'test_bfv_quota_race_local_delete',

View File

@ -36,7 +36,7 @@ class VolumeBackedResizeDiskDown(test.TestCase,
self.api = api_fixture.admin_api
self.useFixture(nova_fixtures.NeutronFixture(self))
self.useFixture(nova_fixtures.CinderFixtureNewAttachFlow(self))
self.useFixture(nova_fixtures.CinderFixture(self))
self.useFixture(func_fixtures.PlacementFixture())
fake_image.stub_out_image_service(self)
self.addCleanup(fake_image.FakeImageService_reset)
@ -54,7 +54,7 @@ class VolumeBackedResizeDiskDown(test.TestCase,
flavor2 = flavors[1]
self.assertGreater(flavor2['disk'], flavor1['disk'])
vol_id = nova_fixtures.CinderFixtureNewAttachFlow.IMAGE_BACKED_VOL
vol_id = nova_fixtures.CinderFixture.IMAGE_BACKED_VOL
server = {
'name': 'test_volume_backed_resize_disk_down',
'imageRef': '',

View File

@ -46,7 +46,7 @@ class BootFromVolumeTest(integrated_helpers.InstanceHelperMixin,
self.flags(allow_resize_to_same_host=True)
super(BootFromVolumeTest, self).setUp()
self.admin_api = self.api_fixture.admin_api
self.useFixture(nova_fixtures.CinderFixtureNewAttachFlow(self))
self.useFixture(nova_fixtures.CinderFixture(self))
def test_boot_from_volume_larger_than_local_gb(self):
# Verify no local disk is being used currently
@ -62,7 +62,7 @@ class BootFromVolumeTest(integrated_helpers.InstanceHelperMixin,
# disk. It should succeed for boot from volume.
server = self._build_server(flavor_id)
server['imageRef'] = ''
volume_uuid = nova_fixtures.CinderFixtureNewAttachFlow.IMAGE_BACKED_VOL
volume_uuid = nova_fixtures.CinderFixture.IMAGE_BACKED_VOL
bdm = {'boot_index': 0,
'uuid': volume_uuid,
'source_type': 'volume',
@ -132,7 +132,7 @@ class BootFromVolumeTest(integrated_helpers.InstanceHelperMixin,
self._verify_request_spec_flavor_not_zero(server_id)
# Rebuild
# The image_uuid is from CinderFixtureNewAttachFlow for the
# The image_uuid is from CinderFixture for the
# volume representing IMAGE_BACKED_VOL.
image_uuid = '155d900f-4e14-4e4c-a73d-069cbf4541e6'
post_data = {'rebuild': {'imageRef': image_uuid}}

View File

@ -25,7 +25,7 @@ class ConfigurableMaxDiskDevicesTest(integrated_helpers.InstanceHelperMixin,
def setUp(self):
super(ConfigurableMaxDiskDevicesTest, self).setUp()
self.cinder = self.useFixture(
nova_fixtures.CinderFixtureNewAttachFlow(self))
nova_fixtures.CinderFixture(self))
def _wait_for_volume_attach(self, server_id, volume_id):
for i in range(0, 100):
@ -44,7 +44,7 @@ class ConfigurableMaxDiskDevicesTest(integrated_helpers.InstanceHelperMixin,
self.flags(max_disk_devices_to_attach=1, group='compute')
server = self._build_server(flavor_id='1')
server['imageRef'] = ''
volume_uuid = nova_fixtures.CinderFixtureNewAttachFlow.IMAGE_BACKED_VOL
volume_uuid = nova_fixtures.CinderFixture.IMAGE_BACKED_VOL
bdm = {'boot_index': 0,
'uuid': volume_uuid,
'source_type': 'volume',
@ -59,7 +59,7 @@ class ConfigurableMaxDiskDevicesTest(integrated_helpers.InstanceHelperMixin,
self.flags(max_disk_devices_to_attach=1, group='compute')
server = self._build_server(flavor_id='1')
server['imageRef'] = ''
vol_img_id = nova_fixtures.CinderFixtureNewAttachFlow.IMAGE_BACKED_VOL
vol_img_id = nova_fixtures.CinderFixture.IMAGE_BACKED_VOL
boot_vol = {'boot_index': 0,
'uuid': vol_img_id,
'source_type': 'volume',

View File

@ -20,7 +20,7 @@ class TestMultiattachVolumes(integrated_helpers._IntegratedTestBase,
"""Functional tests for creating a server from a multiattach volume
and attaching a multiattach volume to a server.
Uses the CinderFixtureNewAttachFlow fixture with a specific volume ID
Uses the CinderFixture fixture with a specific volume ID
to represent a multiattach volume.
"""
# These are all used in _IntegratedTestBase.
@ -35,13 +35,13 @@ class TestMultiattachVolumes(integrated_helpers._IntegratedTestBase,
# multiattach.
self.useFixture(nova_fixtures.AllServicesCurrent())
super(TestMultiattachVolumes, self).setUp()
self.useFixture(nova_fixtures.CinderFixtureNewAttachFlow(self))
self.useFixture(nova_fixtures.CinderFixture(self))
def test_boot_from_volume_and_attach_to_second_server(self):
"""This scenario creates a server from the multiattach volume, waits
for it to be ACTIVE, and then attaches the volume to another server.
"""
volume_id = nova_fixtures.CinderFixtureNewAttachFlow.MULTIATTACH_VOL
volume_id = nova_fixtures.CinderFixture.MULTIATTACH_VOL
create_req = self._build_server(flavor_id='1', image='')
create_req['networks'] = 'none'
create_req['block_device_mapping_v2'] = [{

View File

@ -373,7 +373,7 @@ class TestNovaManagePlacementHealAllocations(
def setUp(self):
super(TestNovaManagePlacementHealAllocations, self).setUp()
self.useFixture(nova_fixtures.CinderFixtureNewAttachFlow(self))
self.useFixture(nova_fixtures.CinderFixture(self))
self.cli = manage.PlacementCommands()
# We need to start a compute in each non-cell0 cell.
for cell_name, cell_mapping in self.cell_mappings.items():
@ -410,7 +410,7 @@ class TestNovaManagePlacementHealAllocations(
networks='none')
server_req['availability_zone'] = 'nova:%s' % hostname
if volume_backed:
vol_id = nova_fixtures.CinderFixtureNewAttachFlow.IMAGE_BACKED_VOL
vol_id = nova_fixtures.CinderFixture.IMAGE_BACKED_VOL
server_req['block_device_mapping_v2'] = [{
'source_type': 'volume',
'destination_type': 'volume',

View File

@ -858,7 +858,7 @@ class ServersTest(ServersTestBase):
self.assertEqual(403, ex.response.status_code)
def test_attach_vol_maximum_disk_devices_exceeded(self):
self.useFixture(nova_fixtures.CinderFixtureNewAttachFlow(self))
self.useFixture(nova_fixtures.CinderFixture(self))
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({"server": server})
@ -1583,7 +1583,7 @@ class ServerRebuildTestCase(integrated_helpers._IntegratedTestBase,
different image than what is in the root disk of the root volume
will result in a 400 BadRequest error.
"""
self.useFixture(nova_fixtures.CinderFixtureNewAttachFlow(self))
self.useFixture(nova_fixtures.CinderFixture(self))
# First create our server as normal.
server_req_body = {
# There is no imageRef because this is boot from volume.
@ -1596,7 +1596,7 @@ class ServerRebuildTestCase(integrated_helpers._IntegratedTestBase,
'block_device_mapping_v2': [{
'boot_index': 0,
'uuid':
nova_fixtures.CinderFixtureNewAttachFlow.IMAGE_BACKED_VOL,
nova_fixtures.CinderFixture.IMAGE_BACKED_VOL,
'source_type': 'volume',
'destination_type': 'volume'
}]
@ -3703,8 +3703,8 @@ class VolumeBackedServerTest(integrated_helpers.ProviderUsageBaseTestCase):
return server
def _create_volume_backed_server(self):
self.useFixture(nova_fixtures.CinderFixtureNewAttachFlow(self))
volume_id = nova_fixtures.CinderFixtureNewAttachFlow.IMAGE_BACKED_VOL
self.useFixture(nova_fixtures.CinderFixture(self))
volume_id = nova_fixtures.CinderFixture.IMAGE_BACKED_VOL
server_req_body = {
# There is no imageRef because this is boot from volume.
'server': {
@ -4013,12 +4013,12 @@ class TraitsBasedSchedulingTest(integrated_helpers.ProviderUsageBaseTestCase):
rp_uuid = self._get_provider_uuid_by_host(self.compute2.host)
self._set_provider_traits(rp_uuid, ['HW_CPU_X86_SGX'])
self.useFixture(nova_fixtures.CinderFixtureNewAttachFlow(self))
self.useFixture(nova_fixtures.CinderFixture(self))
# Create our server with a volume containing the image meta data with a
# required trait
server = self._create_volume_backed_server_with_traits(
self.flavor_without_trait['id'],
nova_fixtures.CinderFixtureNewAttachFlow.
nova_fixtures.CinderFixture.
IMAGE_WITH_TRAITS_BACKED_VOL)
server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
@ -4037,12 +4037,12 @@ class TraitsBasedSchedulingTest(integrated_helpers.ProviderUsageBaseTestCase):
self._set_provider_traits(rp_uuid, ['HW_CPU_X86_VMX',
'HW_CPU_X86_SGX'])
self.useFixture(nova_fixtures.CinderFixtureNewAttachFlow(self))
self.useFixture(nova_fixtures.CinderFixture(self))
# Create our server with a flavor trait and a volume containing the
# image meta data with a required trait
server = self._create_volume_backed_server_with_traits(
self.flavor_with_trait['id'],
nova_fixtures.CinderFixtureNewAttachFlow.
nova_fixtures.CinderFixture.
IMAGE_WITH_TRAITS_BACKED_VOL)
server = self._wait_for_state_change(self.admin_api, server, 'ACTIVE')
@ -4109,11 +4109,11 @@ class TraitsBasedSchedulingTest(integrated_helpers.ProviderUsageBaseTestCase):
fails to find a valid host since no compute node resource providers
have the trait.
"""
self.useFixture(nova_fixtures.CinderFixtureNewAttachFlow(self))
self.useFixture(nova_fixtures.CinderFixture(self))
# Create our server with a volume
server = self._create_volume_backed_server_with_traits(
self.flavor_without_trait['id'],
nova_fixtures.CinderFixtureNewAttachFlow.
nova_fixtures.CinderFixture.
IMAGE_WITH_TRAITS_BACKED_VOL)
# The server should go to ERROR state because there is no valid host.

View File

@ -248,12 +248,12 @@ class ServersPreSchedulingTestCase(test.TestCase,
def test_bfv_delete_build_request_pre_scheduling(self):
cinder = self.useFixture(
nova_fixtures.CinderFixtureNewAttachFlow(self))
nova_fixtures.CinderFixture(self))
# This makes the get_minimum_version_all_cells check say we're running
# the latest of everything.
self.useFixture(nova_fixtures.AllServicesCurrent())
volume_id = nova_fixtures.CinderFixtureNewAttachFlow.IMAGE_BACKED_VOL
volume_id = nova_fixtures.CinderFixture.IMAGE_BACKED_VOL
server = self.api.post_server({
'server': {
'flavorRef': '1',
@ -372,7 +372,7 @@ class EnforceVolumeBackedForZeroDiskFlavorTestCase(
# we don't start compute so that scheduling fails; we don't really
# care about successfully building an active server here.
self.useFixture(func_fixtures.PlacementFixture())
self.useFixture(nova_fixtures.CinderFixtureNewAttachFlow(self))
self.useFixture(nova_fixtures.CinderFixture(self))
self.start_service('conductor')
self.start_service('scheduler')
server_req = self._build_minimal_create_server_request(
@ -381,7 +381,7 @@ class EnforceVolumeBackedForZeroDiskFlavorTestCase(
flavor_id=self.zero_disk_flavor['id'])
server_req.pop('imageRef', None)
server_req['block_device_mapping_v2'] = [{
'uuid': nova_fixtures.CinderFixtureNewAttachFlow.IMAGE_BACKED_VOL,
'uuid': nova_fixtures.CinderFixture.IMAGE_BACKED_VOL,
'source_type': 'volume',
'destination_type': 'volume',
'boot_index': 0