Merge "Default AZ for instance if cross_az_attach=False and checking from API"
This commit is contained in:
commit
46a02d5eb5
@ -133,6 +133,14 @@ With respect to availability zones, a server is restricted to a zone if:
|
||||
``availability_zone`` with the ``POST /servers/{server_id}/action`` request
|
||||
using microversion 2.77 or greater.
|
||||
|
||||
4. :oslo.config:option:`cinder.cross_az_attach` is False,
|
||||
:oslo.config:option:`default_schedule_zone` is None,
|
||||
the server is created without an explicit zone but with pre-existing volume
|
||||
block device mappings. In that case the server will be created in the same
|
||||
zone as the volume(s) if the volume zone is not the same as
|
||||
:oslo.config:option:`default_availability_zone`. See `Resource affinity`_
|
||||
for details.
|
||||
|
||||
If the server was not created in a specific zone then it is free to be moved
|
||||
to other zones, i.e. the :ref:`AvailabilityZoneFilter <AvailabilityZoneFilter>`
|
||||
is a no-op.
|
||||
@ -174,7 +182,12 @@ a server to another zone could also break affinity with attached volumes.
|
||||
``cross_az_attach=False`` is not widely used nor tested extensively and
|
||||
thus suffers from some known issues:
|
||||
|
||||
* `Bug 1694844 <https://bugs.launchpad.net/nova/+bug/1694844>`_
|
||||
* `Bug 1694844 <https://bugs.launchpad.net/nova/+bug/1694844>`_. This is
|
||||
fixed in the 21.0.0 (Ussuri) release by using the volume zone for the
|
||||
server being created if the server is created without an explicit zone,
|
||||
:oslo.config:option:`default_schedule_zone` is None, and the volume zone
|
||||
does not match the value of
|
||||
:oslo.config:option:`default_availability_zone`.
|
||||
* `Bug 1781421 <https://bugs.launchpad.net/nova/+bug/1781421>`_
|
||||
|
||||
|
||||
|
@ -737,6 +737,7 @@ class ServersController(wsgi.Controller):
|
||||
exception.FlavorMemoryTooSmall,
|
||||
exception.InvalidMetadata,
|
||||
exception.InvalidVolume,
|
||||
exception.MismatchVolumeAZException,
|
||||
exception.MultiplePortsNotApplicable,
|
||||
exception.InvalidFixedIpAndMaxCountRequest,
|
||||
exception.InstanceUserDataMalformed,
|
||||
|
@ -1031,6 +1031,104 @@ class API(base.Base):
|
||||
except exception.ResourceProviderNotFound:
|
||||
raise exception.ComputeHostNotFound(host=hypervisor_hostname)
|
||||
|
||||
def _get_volumes_for_bdms(self, context, bdms):
|
||||
"""Get the pre-existing volumes from cinder for the list of BDMs.
|
||||
|
||||
:param context: nova auth RequestContext
|
||||
:param bdms: BlockDeviceMappingList which has zero or more BDMs with
|
||||
a pre-existing volume_id specified.
|
||||
:return: dict, keyed by volume id, of volume dicts
|
||||
:raises: VolumeNotFound - if a given volume does not exist
|
||||
:raises: CinderConnectionFailed - if there are problems communicating
|
||||
with the cinder API
|
||||
:raises: Forbidden - if the user token does not have authority to see
|
||||
a volume
|
||||
"""
|
||||
volumes = {}
|
||||
for bdm in bdms:
|
||||
if bdm.volume_id:
|
||||
volumes[bdm.volume_id] = self.volume_api.get(
|
||||
context, bdm.volume_id)
|
||||
return volumes
|
||||
|
||||
@staticmethod
|
||||
def _validate_vol_az_for_create(instance_az, volumes):
|
||||
"""Performs cross_az_attach validation for the instance and volumes.
|
||||
|
||||
If [cinder]/cross_az_attach=True (default) this method is a no-op.
|
||||
|
||||
If [cinder]/cross_az_attach=False, this method will validate that:
|
||||
|
||||
1. All volumes are in the same availability zone.
|
||||
2. The volume AZ matches the instance AZ. If the instance is being
|
||||
created without a specific AZ (either via the user request or the
|
||||
[DEFAULT]/default_schedule_zone option), and the volume AZ matches
|
||||
[DEFAULT]/default_availability_zone for compute services, then the
|
||||
method returns the volume AZ so it can be set in the RequestSpec as
|
||||
if the user requested the zone explicitly.
|
||||
|
||||
:param instance_az: Availability zone for the instance. In this case
|
||||
the host is not yet selected so the instance AZ value should come
|
||||
from one of the following cases:
|
||||
|
||||
* The user requested availability zone.
|
||||
* [DEFAULT]/default_schedule_zone (defaults to None) if the request
|
||||
does not specify an AZ (see parse_availability_zone).
|
||||
:param volumes: iterable of dicts of cinder volumes to be attached to
|
||||
the server being created
|
||||
:returns: None or volume AZ to set in the RequestSpec for the instance
|
||||
:raises: MismatchVolumeAZException if the instance and volume AZ do
|
||||
not match
|
||||
"""
|
||||
if CONF.cinder.cross_az_attach:
|
||||
return
|
||||
|
||||
if not volumes:
|
||||
return
|
||||
|
||||
# First make sure that all of the volumes are in the same zone.
|
||||
vol_zones = [vol['availability_zone'] for vol in volumes]
|
||||
if len(set(vol_zones)) > 1:
|
||||
msg = (_("Volumes are in different availability zones: %s")
|
||||
% ','.join(vol_zones))
|
||||
raise exception.MismatchVolumeAZException(reason=msg)
|
||||
|
||||
volume_az = vol_zones[0]
|
||||
# In this case the instance.host should not be set so the instance AZ
|
||||
# value should come from instance.availability_zone which will be one
|
||||
# of the following cases:
|
||||
# * The user requested availability zone.
|
||||
# * [DEFAULT]/default_schedule_zone (defaults to None) if the request
|
||||
# does not specify an AZ (see parse_availability_zone).
|
||||
|
||||
# If the instance is not being created with a specific AZ (the AZ is
|
||||
# input via the API create request *or* [DEFAULT]/default_schedule_zone
|
||||
# is not None), then check to see if we should use the default AZ
|
||||
# (which by default matches the default AZ in Cinder, i.e. 'nova').
|
||||
if instance_az is None:
|
||||
# Check if the volume AZ is the same as our default AZ for compute
|
||||
# hosts (nova) and if so, assume we are OK because the user did not
|
||||
# request an AZ and will get the same default. If the volume AZ is
|
||||
# not the same as our default, return the volume AZ so the caller
|
||||
# can put it into the request spec so the instance is scheduled
|
||||
# to the same zone as the volume. Note that we are paranoid about
|
||||
# the default here since both nova and cinder's default backend AZ
|
||||
# is "nova" and we do not want to pin the server to that AZ since
|
||||
# it's special, i.e. just like we tell users in the docs to not
|
||||
# specify availability_zone='nova' when creating a server since we
|
||||
# might not be able to migrate it later.
|
||||
if volume_az != CONF.default_availability_zone:
|
||||
return volume_az # indication to set in request spec
|
||||
# The volume AZ is the same as the default nova AZ so we will be OK
|
||||
return
|
||||
|
||||
if instance_az != volume_az:
|
||||
msg = _("Server and volumes are not in the same availability "
|
||||
"zone. Server is in: %(instance_az)s. Volumes are in: "
|
||||
"%(volume_az)s") % {
|
||||
'instance_az': instance_az, 'volume_az': volume_az}
|
||||
raise exception.MismatchVolumeAZException(reason=msg)
|
||||
|
||||
def _provision_instances(self, context, instance_type, min_count,
|
||||
max_count, base_options, boot_meta, security_groups,
|
||||
block_device_mapping, shutdown_terminate,
|
||||
@ -1056,12 +1154,23 @@ class API(base.Base):
|
||||
security_groups)
|
||||
self.security_group_api.ensure_default(context)
|
||||
port_resource_requests = base_options.pop('port_resource_requests')
|
||||
LOG.debug("Going to run %s instances...", num_instances)
|
||||
instances_to_build = []
|
||||
# We could be iterating over several instances with several BDMs per
|
||||
# instance and those BDMs could be using a lot of the same images so
|
||||
# we want to cache the image API GET results for performance.
|
||||
image_cache = {} # dict of image dicts keyed by image id
|
||||
# Before processing the list of instances get all of the requested
|
||||
# pre-existing volumes so we can do some validation here rather than
|
||||
# down in the bowels of _validate_bdm.
|
||||
volumes = self._get_volumes_for_bdms(context, block_device_mapping)
|
||||
volume_az = self._validate_vol_az_for_create(
|
||||
base_options['availability_zone'], volumes.values())
|
||||
if volume_az:
|
||||
# This means the instance is not being created in a specific zone
|
||||
# but needs to match the zone that the volumes are in so update
|
||||
# base_options to match the volume zone.
|
||||
base_options['availability_zone'] = volume_az
|
||||
LOG.debug("Going to run %s instances...", num_instances)
|
||||
try:
|
||||
for i in range(num_instances):
|
||||
# Create a uuid for the instance so we can store the
|
||||
@ -1115,7 +1224,7 @@ class API(base.Base):
|
||||
block_device_mapping = (
|
||||
self._bdm_validate_set_size_and_instance(context,
|
||||
instance, instance_type, block_device_mapping,
|
||||
image_cache, supports_multiattach))
|
||||
image_cache, volumes, supports_multiattach))
|
||||
instance_tags = self._transform_tags(tags, instance.uuid)
|
||||
|
||||
build_request = objects.BuildRequest(context,
|
||||
@ -1471,7 +1580,7 @@ class API(base.Base):
|
||||
def _bdm_validate_set_size_and_instance(self, context, instance,
|
||||
instance_type,
|
||||
block_device_mapping,
|
||||
image_cache,
|
||||
image_cache, volumes,
|
||||
supports_multiattach=False):
|
||||
"""Ensure the bdms are valid, then set size and associate with instance
|
||||
|
||||
@ -1485,6 +1594,7 @@ class API(base.Base):
|
||||
:param image_cache: dict of image dicts keyed by id which is used as a
|
||||
cache in case there are multiple BDMs in the same request using
|
||||
the same image to avoid redundant GET calls to the image service
|
||||
:param volumes: dict, keyed by volume id, of volume dicts from cinder
|
||||
:param supports_multiattach: True if the request supports multiattach
|
||||
volumes, False otherwise
|
||||
"""
|
||||
@ -1492,7 +1602,7 @@ class API(base.Base):
|
||||
instance_uuid=instance.uuid)
|
||||
self._validate_bdm(
|
||||
context, instance, instance_type, block_device_mapping,
|
||||
image_cache, supports_multiattach)
|
||||
image_cache, volumes, supports_multiattach)
|
||||
instance_block_device_mapping = block_device_mapping.obj_clone()
|
||||
for bdm in instance_block_device_mapping:
|
||||
bdm.volume_size = self._volume_size(instance_type, bdm)
|
||||
@ -1520,7 +1630,7 @@ class API(base.Base):
|
||||
id_or_name=volume_type_id_or_name)
|
||||
|
||||
def _validate_bdm(self, context, instance, instance_type,
|
||||
block_device_mappings, image_cache,
|
||||
block_device_mappings, image_cache, volumes,
|
||||
supports_multiattach=False):
|
||||
"""Validate requested block device mappings.
|
||||
|
||||
@ -1531,6 +1641,7 @@ class API(base.Base):
|
||||
:param image_cache: dict of image dicts keyed by id which is used as a
|
||||
cache in case there are multiple BDMs in the same request using
|
||||
the same image to avoid redundant GET calls to the image service
|
||||
:param volumes: dict, keyed by volume id, of volume dicts from cinder
|
||||
:param supports_multiattach: True if the request supports multiattach
|
||||
volumes, False otherwise
|
||||
"""
|
||||
@ -1590,9 +1701,12 @@ class API(base.Base):
|
||||
"size specified"))
|
||||
elif volume_id is not None:
|
||||
try:
|
||||
volume = self.volume_api.get(context, volume_id)
|
||||
volume = volumes[volume_id]
|
||||
# We do not validate the instance and volume AZ here
|
||||
# because that is done earlier by _provision_instances.
|
||||
self._check_attach_and_reserve_volume(
|
||||
context, volume, instance, bdm, supports_multiattach)
|
||||
context, volume, instance, bdm, supports_multiattach,
|
||||
validate_az=False)
|
||||
bdm.volume_size = volume.get('size')
|
||||
except (exception.CinderConnectionFailed,
|
||||
exception.InvalidVolume,
|
||||
@ -4076,10 +4190,26 @@ class API(base.Base):
|
||||
pass
|
||||
|
||||
def _check_attach_and_reserve_volume(self, context, volume, instance,
|
||||
bdm, supports_multiattach=False):
|
||||
bdm, supports_multiattach=False,
|
||||
validate_az=True):
|
||||
"""Perform checks against the instance and volume before attaching.
|
||||
|
||||
If validation succeeds, the bdm is updated with an attachment_id which
|
||||
effectively reserves it during the attach process in cinder.
|
||||
|
||||
:param context: nova auth RequestContext
|
||||
:param volume: volume dict from cinder
|
||||
:param instance: Instance object
|
||||
:param bdm: BlockDeviceMapping object
|
||||
:param supports_multiattach: True if the request supports multiattach
|
||||
volumes, i.e. microversion >= 2.60, False otherwise
|
||||
:param validate_az: True if the instance and volume availability zones
|
||||
should be validated for cross_az_attach, False to not validate AZ
|
||||
"""
|
||||
volume_id = volume['id']
|
||||
self.volume_api.check_availability_zone(context, volume,
|
||||
instance=instance)
|
||||
if validate_az:
|
||||
self.volume_api.check_availability_zone(context, volume,
|
||||
instance=instance)
|
||||
# If volume.multiattach=True and the microversion to
|
||||
# support multiattach is not used, fail the request.
|
||||
if volume['multiattach'] and not supports_multiattach:
|
||||
|
@ -55,6 +55,10 @@ Possible values:
|
||||
* Any string representing an existing availability zone name.
|
||||
* None, which means that the instance can move from one availability zone to
|
||||
another during its lifetime if it is moved from one compute node to another.
|
||||
|
||||
Related options:
|
||||
|
||||
* ``[cinder]/cross_az_attach``
|
||||
"""),
|
||||
]
|
||||
|
||||
|
@ -89,13 +89,20 @@ Allow attach between instance and volume in different availability zones.
|
||||
|
||||
If False, volumes attached to an instance must be in the same availability
|
||||
zone in Cinder as the instance availability zone in Nova.
|
||||
|
||||
This also means care should be taken when booting an instance from a volume
|
||||
where source is not "volume" because Nova will attempt to create a volume using
|
||||
the same availability zone as what is assigned to the instance.
|
||||
If that AZ is not in Cinder (or allow_availability_zone_fallback=False in
|
||||
|
||||
If that AZ is not in Cinder (or ``allow_availability_zone_fallback=False`` in
|
||||
cinder.conf), the volume create request will fail and the instance will fail
|
||||
the build request.
|
||||
|
||||
By default there is no availability zone restriction on volume attach.
|
||||
|
||||
Related options:
|
||||
|
||||
* ``[DEFAULT]/default_schedule_zone``
|
||||
"""),
|
||||
]
|
||||
|
||||
|
@ -43,14 +43,20 @@ class CrossAZAttachTestCase(test.TestCase,
|
||||
api_version='v2.1')).admin_api
|
||||
self.start_service('conductor')
|
||||
self.start_service('scheduler')
|
||||
# Start one compute service.
|
||||
# Start one compute service and add it to the AZ. This allows us to
|
||||
# get past the AvailabilityZoneFilter and build a server.
|
||||
self.start_service('compute', host='host1')
|
||||
agg_id = self.api.post_aggregate({'aggregate': {
|
||||
'name': self.az, 'availability_zone': self.az}})['id']
|
||||
self.api.api_post('/os-aggregates/%s/action' % agg_id,
|
||||
{'add_host': {'host': 'host1'}})
|
||||
|
||||
def test_cross_az_attach_false_boot_from_volume_no_az_specified(self):
|
||||
"""Tests the scenario where [cinder]/cross_az_attach=False and the
|
||||
server is created with a pre-existing volume but the server create
|
||||
request does not specify an AZ nor is [DEFAULT]/default_schedule_zone
|
||||
set.
|
||||
set. In this case the server is created in the zone specified by the
|
||||
volume.
|
||||
"""
|
||||
self.flags(cross_az_attach=False, group='cinder')
|
||||
server = self._build_minimal_create_server_request(
|
||||
@ -63,22 +69,16 @@ class CrossAZAttachTestCase(test.TestCase,
|
||||
'boot_index': 0,
|
||||
'uuid': nova_fixtures.CinderFixture.IMAGE_BACKED_VOL
|
||||
}]
|
||||
# FIXME(mriedem): This is bug 1694844 where the user creates the server
|
||||
# without specifying an AZ and there is no default_schedule_zone set
|
||||
# and the cross_az_attach check fails because the volume's availability
|
||||
# zone shows up as "us-central-1" and None != "us-central-1" so the API
|
||||
# thinks the cross_az_attach=False setting was violated.
|
||||
ex = self.assertRaises(api_client.OpenStackApiException,
|
||||
self.api.post_server, {'server': server})
|
||||
self.assertEqual(400, ex.response.status_code)
|
||||
self.assertIn('are not in the same availability_zone',
|
||||
six.text_type(ex))
|
||||
server = self.api.post_server({'server': server})
|
||||
server = self._wait_for_state_change(self.api, server, 'ACTIVE')
|
||||
self.assertEqual(self.az, server['OS-EXT-AZ:availability_zone'])
|
||||
|
||||
def test_cross_az_attach_false_data_volume_no_az_specified(self):
|
||||
"""Tests the scenario where [cinder]/cross_az_attach=False and the
|
||||
server is created with a pre-existing volume as a non-boot data volume
|
||||
but the server create request does not specify an AZ nor is
|
||||
[DEFAULT]/default_schedule_zone set.
|
||||
[DEFAULT]/default_schedule_zone set. In this case the server is created
|
||||
in the zone specified by the non-root data volume.
|
||||
"""
|
||||
self.flags(cross_az_attach=False, group='cinder')
|
||||
server = self._build_minimal_create_server_request(
|
||||
@ -94,16 +94,9 @@ class CrossAZAttachTestCase(test.TestCase,
|
||||
# This is a non-bootable volume in the CinderFixture.
|
||||
'volume_id': nova_fixtures.CinderFixture.SWAP_OLD_VOL
|
||||
}]
|
||||
# FIXME(mriedem): This is bug 1694844 where the user creates the server
|
||||
# without specifying an AZ and there is no default_schedule_zone set
|
||||
# and the cross_az_attach check fails because the volume's availability
|
||||
# zone shows up as "us-central-1" and None != "us-central-1" so the API
|
||||
# thinks the cross_az_attach=False setting was violated.
|
||||
ex = self.assertRaises(api_client.OpenStackApiException,
|
||||
self.api.post_server, {'server': server})
|
||||
self.assertEqual(400, ex.response.status_code)
|
||||
self.assertIn('are not in the same availability_zone',
|
||||
six.text_type(ex))
|
||||
server = self.api.post_server({'server': server})
|
||||
server = self._wait_for_state_change(self.api, server, 'ACTIVE')
|
||||
self.assertEqual(self.az, server['OS-EXT-AZ:availability_zone'])
|
||||
|
||||
def test_cross_az_attach_false_boot_from_volume_default_zone_match(self):
|
||||
"""Tests the scenario where [cinder]/cross_az_attach=False and the
|
||||
@ -112,16 +105,6 @@ class CrossAZAttachTestCase(test.TestCase,
|
||||
"""
|
||||
self.flags(cross_az_attach=False, group='cinder')
|
||||
self.flags(default_schedule_zone=self.az)
|
||||
# For this test we have to put the compute host in an aggregate with
|
||||
# the AZ we want to match.
|
||||
agg_id = self.api.post_aggregate({
|
||||
'aggregate': {
|
||||
'name': self.az,
|
||||
'availability_zone': self.az
|
||||
}
|
||||
})['id']
|
||||
self.api.add_host_to_aggregate(agg_id, 'host1')
|
||||
|
||||
server = self._build_minimal_create_server_request(
|
||||
self.api,
|
||||
'test_cross_az_attach_false_boot_from_volume_default_zone_match')
|
||||
@ -135,3 +118,39 @@ class CrossAZAttachTestCase(test.TestCase,
|
||||
server = self.api.post_server({'server': server})
|
||||
server = self._wait_for_state_change(self.api, server, 'ACTIVE')
|
||||
self.assertEqual(self.az, server['OS-EXT-AZ:availability_zone'])
|
||||
|
||||
def test_cross_az_attach_false_bfv_az_specified_mismatch(self):
|
||||
"""Negative test where the server is being created in a specific AZ
|
||||
that does not match the volume being attached which results in a 400
|
||||
error response.
|
||||
"""
|
||||
self.flags(cross_az_attach=False, group='cinder')
|
||||
server = self._build_minimal_create_server_request(
|
||||
self.api, 'test_cross_az_attach_false_bfv_az_specified_mismatch',
|
||||
az='london')
|
||||
del server['imageRef'] # Do not need imageRef for boot from volume.
|
||||
server['block_device_mapping_v2'] = [{
|
||||
'source_type': 'volume',
|
||||
'destination_type': 'volume',
|
||||
'boot_index': 0,
|
||||
'uuid': nova_fixtures.CinderFixture.IMAGE_BACKED_VOL
|
||||
}]
|
||||
# Use the AZ fixture to fake out the london AZ.
|
||||
with nova_fixtures.AvailabilityZoneFixture(zones=['london', self.az]):
|
||||
ex = self.assertRaises(api_client.OpenStackApiException,
|
||||
self.api.post_server, {'server': server})
|
||||
self.assertEqual(400, ex.response.status_code)
|
||||
self.assertIn('Server and volumes are not in the same availability '
|
||||
'zone. Server is in: london. Volumes are in: %s' %
|
||||
self.az, six.text_type(ex))
|
||||
|
||||
def test_cross_az_attach_false_no_volumes(self):
|
||||
"""A simple test to make sure cross_az_attach=False API validation is
|
||||
a noop if there are no volumes in the server create request.
|
||||
"""
|
||||
self.flags(cross_az_attach=False, group='cinder')
|
||||
server = self._build_minimal_create_server_request(
|
||||
self.api, 'test_cross_az_attach_false_no_volumes', az=self.az)
|
||||
server = self.api.post_server({'server': server})
|
||||
server = self._wait_for_state_change(self.api, server, 'ACTIVE')
|
||||
self.assertEqual(self.az, server['OS-EXT-AZ:availability_zone'])
|
||||
|
@ -4702,10 +4702,11 @@ class ServersControllerCreateTest(test.TestCase):
|
||||
self.assertRaises(webob.exc.HTTPBadRequest,
|
||||
self._test_create, {}, no_image=True)
|
||||
|
||||
@mock.patch('nova.compute.api.API._get_volumes_for_bdms')
|
||||
@mock.patch.object(compute_api.API, '_validate_bdm')
|
||||
@mock.patch.object(compute_api.API, '_get_bdm_image_metadata')
|
||||
def test_create_instance_with_bdms_and_no_image(
|
||||
self, mock_bdm_image_metadata, mock_validate_bdm):
|
||||
self, mock_bdm_image_metadata, mock_validate_bdm, mock_get_vols):
|
||||
mock_bdm_image_metadata.return_value = {}
|
||||
mock_validate_bdm.return_value = True
|
||||
old_create = compute_api.API.create
|
||||
@ -4726,10 +4727,11 @@ class ServersControllerCreateTest(test.TestCase):
|
||||
mock_bdm_image_metadata.assert_called_once_with(
|
||||
mock.ANY, mock.ANY, False)
|
||||
|
||||
@mock.patch('nova.compute.api.API._get_volumes_for_bdms')
|
||||
@mock.patch.object(compute_api.API, '_validate_bdm')
|
||||
@mock.patch.object(compute_api.API, '_get_bdm_image_metadata')
|
||||
def test_create_instance_with_bdms_and_empty_imageRef(
|
||||
self, mock_bdm_image_metadata, mock_validate_bdm):
|
||||
self, mock_bdm_image_metadata, mock_validate_bdm, mock_get_volumes):
|
||||
mock_bdm_image_metadata.return_value = {}
|
||||
mock_validate_bdm.return_value = True
|
||||
old_create = compute_api.API.create
|
||||
@ -4930,8 +4932,9 @@ class ServersControllerCreateTest(test.TestCase):
|
||||
def test_create_instance_with_invalid_destination_type(self):
|
||||
self._test_create_instance_with_destination_type_error('fake')
|
||||
|
||||
@mock.patch('nova.compute.api.API._get_volumes_for_bdms')
|
||||
@mock.patch.object(compute_api.API, '_validate_bdm')
|
||||
def test_create_instance_bdm(self, mock_validate_bdm):
|
||||
def test_create_instance_bdm(self, mock_validate_bdm, mock_get_volumes):
|
||||
bdm = [{
|
||||
'source_type': 'volume',
|
||||
'device_name': 'fake_dev',
|
||||
@ -4959,8 +4962,10 @@ class ServersControllerCreateTest(test.TestCase):
|
||||
self._test_create(params, no_image=True)
|
||||
mock_validate_bdm.assert_called_once()
|
||||
|
||||
@mock.patch('nova.compute.api.API._get_volumes_for_bdms')
|
||||
@mock.patch.object(compute_api.API, '_validate_bdm')
|
||||
def test_create_instance_bdm_missing_device_name(self, mock_validate_bdm):
|
||||
def test_create_instance_bdm_missing_device_name(self, mock_validate_bdm,
|
||||
mock_get_volumes):
|
||||
del self.bdm_v2[0]['device_name']
|
||||
|
||||
old_create = compute_api.API.create
|
||||
@ -4992,7 +4997,8 @@ class ServersControllerCreateTest(test.TestCase):
|
||||
self.assertRaises(webob.exc.HTTPBadRequest, self._test_create, params,
|
||||
no_image=True)
|
||||
|
||||
def test_create_instance_bdm_api_validation_fails(self):
|
||||
@mock.patch('nova.compute.api.API._get_volumes_for_bdms')
|
||||
def test_create_instance_bdm_api_validation_fails(self, mock_get_volumes):
|
||||
self.validation_fail_test_validate_called = False
|
||||
self.validation_fail_instance_destroy_called = False
|
||||
|
||||
@ -5025,8 +5031,10 @@ class ServersControllerCreateTest(test.TestCase):
|
||||
self.validation_fail_test_validate_called = False
|
||||
self.validation_fail_instance_destroy_called = False
|
||||
|
||||
@mock.patch('nova.compute.api.API._get_volumes_for_bdms')
|
||||
@mock.patch.object(compute_api.API, '_validate_bdm')
|
||||
def _test_create_bdm(self, params, mock_validate_bdm, no_image=False):
|
||||
def _test_create_bdm(self, params, mock_validate_bdm, mock_get_volumes,
|
||||
no_image=False):
|
||||
self.body['server'].update(params)
|
||||
if no_image:
|
||||
del self.body['server']['imageRef']
|
||||
@ -5039,6 +5047,7 @@ class ServersControllerCreateTest(test.TestCase):
|
||||
test.MatchType(objects.Flavor),
|
||||
test.MatchType(objects.BlockDeviceMappingList),
|
||||
{},
|
||||
mock_get_volumes.return_value,
|
||||
False)
|
||||
|
||||
def test_create_instance_with_volumes_enabled(self):
|
||||
|
@ -1042,17 +1042,12 @@ class ComputeVolumeTestCase(BaseTestCase):
|
||||
def fake_get(self, context, res_id):
|
||||
return {'id': res_id, 'size': 4, 'multiattach': False}
|
||||
|
||||
def fake_check_availability_zone(*args, **kwargs):
|
||||
pass
|
||||
|
||||
def fake_attachment_create(_self, ctxt, vol_id, *args, **kwargs):
|
||||
# Return a unique attachment id per volume.
|
||||
return {'id': getattr(uuids, vol_id)}
|
||||
|
||||
self.stub_out('nova.volume.cinder.API.get', fake_get)
|
||||
self.stub_out('nova.volume.cinder.API.get_snapshot', fake_get)
|
||||
self.stub_out('nova.volume.cinder.API.check_availability_zone',
|
||||
fake_check_availability_zone)
|
||||
self.stub_out('nova.volume.cinder.API.attachment_create',
|
||||
fake_attachment_create)
|
||||
|
||||
@ -1105,8 +1100,12 @@ class ComputeVolumeTestCase(BaseTestCase):
|
||||
self.context, mappings)
|
||||
|
||||
# Make sure it passes at first
|
||||
volumes = {
|
||||
volume_id: fake_get(None, None, volume_id)
|
||||
}
|
||||
self.compute_api._validate_bdm(self.context, instance,
|
||||
instance_type, mappings, {})
|
||||
instance_type, mappings, {},
|
||||
volumes)
|
||||
self.assertEqual(4, mappings[1].volume_size)
|
||||
self.assertEqual(6, mappings[2].volume_size)
|
||||
|
||||
@ -1115,7 +1114,7 @@ class ComputeVolumeTestCase(BaseTestCase):
|
||||
self.assertRaises(exception.InvalidBDMBootSequence,
|
||||
self.compute_api._validate_bdm,
|
||||
self.context, instance, instance_type,
|
||||
mappings, {})
|
||||
mappings, {}, volumes)
|
||||
mappings[2].boot_index = 0
|
||||
|
||||
# number of local block_devices
|
||||
@ -1123,7 +1122,7 @@ class ComputeVolumeTestCase(BaseTestCase):
|
||||
self.assertRaises(exception.InvalidBDMLocalsLimit,
|
||||
self.compute_api._validate_bdm,
|
||||
self.context, instance, instance_type,
|
||||
mappings, {})
|
||||
mappings, {}, volumes)
|
||||
ephemerals = [
|
||||
fake_block_device.FakeDbBlockDeviceDict({
|
||||
'device_name': '/dev/vdb',
|
||||
@ -1152,7 +1151,8 @@ class ComputeVolumeTestCase(BaseTestCase):
|
||||
mappings_ = mappings[:]
|
||||
mappings_.objects.extend(ephemerals)
|
||||
self.compute_api._validate_bdm(self.context, instance,
|
||||
instance_type, mappings_, {})
|
||||
instance_type, mappings_, {},
|
||||
volumes)
|
||||
|
||||
# Ephemerals over the size limit
|
||||
ephemerals[0].volume_size = 3
|
||||
@ -1161,14 +1161,14 @@ class ComputeVolumeTestCase(BaseTestCase):
|
||||
self.assertRaises(exception.InvalidBDMEphemeralSize,
|
||||
self.compute_api._validate_bdm,
|
||||
self.context, instance, instance_type,
|
||||
mappings_, {})
|
||||
mappings_, {}, volumes)
|
||||
|
||||
# Swap over the size limit
|
||||
mappings[0].volume_size = 3
|
||||
self.assertRaises(exception.InvalidBDMSwapSize,
|
||||
self.compute_api._validate_bdm,
|
||||
self.context, instance, instance_type,
|
||||
mappings, {})
|
||||
mappings, {}, volumes)
|
||||
mappings[0].volume_size = 1
|
||||
|
||||
additional_swap = [
|
||||
@ -1191,7 +1191,7 @@ class ComputeVolumeTestCase(BaseTestCase):
|
||||
self.assertRaises(exception.InvalidBDMFormat,
|
||||
self.compute_api._validate_bdm,
|
||||
self.context, instance, instance_type,
|
||||
mappings_, {})
|
||||
mappings_, {}, volumes)
|
||||
|
||||
image_no_size = [
|
||||
fake_block_device.FakeDbBlockDeviceDict({
|
||||
@ -1210,7 +1210,7 @@ class ComputeVolumeTestCase(BaseTestCase):
|
||||
self.assertRaises(exception.InvalidBDM,
|
||||
self.compute_api._validate_bdm,
|
||||
self.context, instance, instance_type,
|
||||
mappings_, {})
|
||||
mappings_, {}, volumes)
|
||||
|
||||
# blank device without a specified size fails
|
||||
blank_no_size = [
|
||||
@ -1229,7 +1229,7 @@ class ComputeVolumeTestCase(BaseTestCase):
|
||||
self.assertRaises(exception.InvalidBDM,
|
||||
self.compute_api._validate_bdm,
|
||||
self.context, instance, instance_type,
|
||||
mappings_, {})
|
||||
mappings_, {}, volumes)
|
||||
|
||||
def test_validate_bdm_with_more_than_one_default(self):
|
||||
instance_type = {'swap': 1, 'ephemeral_gb': 1}
|
||||
@ -1259,19 +1259,15 @@ class ComputeVolumeTestCase(BaseTestCase):
|
||||
'boot_index': -1}, anon=True)]
|
||||
all_mappings = block_device_obj.block_device_make_list_from_dicts(
|
||||
self.context, all_mappings)
|
||||
|
||||
image_cache = volumes = {}
|
||||
self.assertRaises(exception.InvalidBDMEphemeralSize,
|
||||
self.compute_api._validate_bdm,
|
||||
self.context, self.instance,
|
||||
instance_type, all_mappings, {})
|
||||
instance_type, all_mappings, image_cache, volumes)
|
||||
|
||||
@mock.patch.object(cinder.API, 'get')
|
||||
@mock.patch.object(cinder.API, 'check_availability_zone')
|
||||
@mock.patch.object(cinder.API, 'attachment_create',
|
||||
side_effect=exception.InvalidVolume(reason='error'))
|
||||
def test_validate_bdm_media_service_invalid_volume(self, mock_att_create,
|
||||
mock_check_av_zone,
|
||||
mock_get):
|
||||
def test_validate_bdm_media_service_invalid_volume(self, mock_att_create):
|
||||
volume_id = uuids.volume_id
|
||||
instance_type = {'swap': 1, 'ephemeral_gb': 1}
|
||||
bdms = [fake_block_device.FakeDbBlockDeviceDict({
|
||||
@ -1299,54 +1295,27 @@ class ComputeVolumeTestCase(BaseTestCase):
|
||||
|
||||
for status, attach_status in status_values:
|
||||
if attach_status == 'attached':
|
||||
mock_get.return_value = {'id': volume_id,
|
||||
'status': status,
|
||||
'attach_status': attach_status,
|
||||
'multiattach': False,
|
||||
'attachments': {}}
|
||||
|
||||
volumes = {volume_id: {'id': volume_id,
|
||||
'status': status,
|
||||
'attach_status': attach_status,
|
||||
'multiattach': False,
|
||||
'attachments': {}}}
|
||||
else:
|
||||
mock_get.return_value = {'id': volume_id,
|
||||
'status': status,
|
||||
'attach_status': attach_status,
|
||||
'multiattach': False}
|
||||
volumes = {volume_id: {'id': volume_id,
|
||||
'status': status,
|
||||
'attach_status': attach_status,
|
||||
'multiattach': False}}
|
||||
|
||||
self.assertRaises(exception.InvalidVolume,
|
||||
self.compute_api._validate_bdm,
|
||||
self.context, self.instance_object,
|
||||
instance_type, bdms, {})
|
||||
mock_get.assert_called_with(self.context, volume_id)
|
||||
instance_type, bdms, {}, volumes)
|
||||
|
||||
@mock.patch.object(cinder.API, 'get')
|
||||
def test_validate_bdm_media_service_volume_not_found(self, mock_get):
|
||||
volume_id = uuids.volume_id
|
||||
instance_type = {'swap': 1, 'ephemeral_gb': 1}
|
||||
bdms = [fake_block_device.FakeDbBlockDeviceDict({
|
||||
'id': 1,
|
||||
'no_device': None,
|
||||
'source_type': 'volume',
|
||||
'destination_type': 'volume',
|
||||
'snapshot_id': None,
|
||||
'volume_id': volume_id,
|
||||
'device_name': 'vda',
|
||||
'boot_index': 0,
|
||||
'delete_on_termination': False}, anon=True)]
|
||||
bdms = block_device_obj.block_device_make_list_from_dicts(self.context,
|
||||
bdms)
|
||||
|
||||
mock_get.side_effect = exception.VolumeNotFound(volume_id)
|
||||
self.assertRaises(exception.InvalidBDMVolume,
|
||||
self.compute_api._validate_bdm,
|
||||
self.context, self.instance,
|
||||
instance_type, bdms, {})
|
||||
|
||||
@mock.patch.object(cinder.API, 'get')
|
||||
@mock.patch.object(cinder.API, 'check_availability_zone')
|
||||
@mock.patch.object(cinder.API, 'attachment_create',
|
||||
return_value={'id': uuids.attachment_id})
|
||||
def test_validate_bdm_media_service_valid(self, mock_att_create,
|
||||
mock_check_av_zone,
|
||||
mock_get):
|
||||
mock_check_av_zone):
|
||||
volume_id = uuids.volume_id
|
||||
instance_type = {'swap': 1, 'ephemeral_gb': 1}
|
||||
bdms = [fake_block_device.FakeDbBlockDeviceDict({
|
||||
@ -1366,12 +1335,12 @@ class ComputeVolumeTestCase(BaseTestCase):
|
||||
'attach_status': 'detached',
|
||||
'multiattach': False}
|
||||
|
||||
mock_get.return_value = volume
|
||||
image_cache = {}
|
||||
volumes = {volume_id: volume}
|
||||
self.compute_api._validate_bdm(self.context, self.instance_object,
|
||||
instance_type, bdms, {})
|
||||
mock_get.assert_called_once_with(self.context, volume_id)
|
||||
mock_check_av_zone.assert_called_once_with(
|
||||
self.context, volume, instance=self.instance_object)
|
||||
instance_type, bdms, image_cache,
|
||||
volumes)
|
||||
mock_check_av_zone.assert_not_called()
|
||||
mock_att_create.assert_called_once_with(
|
||||
self.context, volume_id, self.instance_object.uuid)
|
||||
|
||||
|
@ -4205,23 +4205,62 @@ class _ComputeAPIUnitTestMixIn(object):
|
||||
self.context,
|
||||
bdms, legacy_bdm=True)
|
||||
|
||||
def test_get_volumes_for_bdms_errors(self):
|
||||
"""Simple test to make sure _get_volumes_for_bdms raises up errors."""
|
||||
# Use a mix of pre-existing and source_type=image volumes to test the
|
||||
# filtering logic in the method.
|
||||
bdms = objects.BlockDeviceMappingList(objects=[
|
||||
objects.BlockDeviceMapping(source_type='image', volume_id=None),
|
||||
objects.BlockDeviceMapping(source_type='volume',
|
||||
volume_id=uuids.volume_id)])
|
||||
for exc in (
|
||||
exception.VolumeNotFound(volume_id=uuids.volume_id),
|
||||
exception.CinderConnectionFailed(reason='gremlins'),
|
||||
exception.Forbidden()
|
||||
):
|
||||
with mock.patch.object(self.compute_api.volume_api, 'get',
|
||||
side_effect=exc) as mock_vol_get:
|
||||
self.assertRaises(type(exc),
|
||||
self.compute_api._get_volumes_for_bdms,
|
||||
self.context, bdms)
|
||||
mock_vol_get.assert_called_once_with(self.context, uuids.volume_id)
|
||||
|
||||
@ddt.data(True, False)
|
||||
def test_validate_vol_az_for_create_multiple_vols_diff_az(self, cross_az):
|
||||
"""Tests cross_az_attach=True|False scenarios where the volumes are
|
||||
in different zones.
|
||||
"""
|
||||
self.flags(cross_az_attach=cross_az, group='cinder')
|
||||
volumes = [{'availability_zone': str(x)} for x in range(2)]
|
||||
if cross_az:
|
||||
# Since cross_az_attach=True (the default) we do not care that the
|
||||
# volumes are in different zones.
|
||||
self.assertIsNone(self.compute_api._validate_vol_az_for_create(
|
||||
None, volumes))
|
||||
else:
|
||||
# In this case the volumes cannot be in different zones.
|
||||
ex = self.assertRaises(
|
||||
exception.MismatchVolumeAZException,
|
||||
self.compute_api._validate_vol_az_for_create, None, volumes)
|
||||
self.assertIn('Volumes are in different availability zones: 0,1',
|
||||
six.text_type(ex))
|
||||
|
||||
def test_validate_vol_az_for_create_vol_az_matches_default_cpu_az(self):
|
||||
"""Tests the scenario that the instance is not being created in a
|
||||
specific zone and the volume's zone matches
|
||||
CONF.default_availabilty_zone so None is returned indicating the
|
||||
RequestSpec.availability_zone does not need to be updated.
|
||||
"""
|
||||
self.flags(cross_az_attach=False, group='cinder')
|
||||
volumes = [{'availability_zone': CONF.default_availability_zone}]
|
||||
self.assertIsNone(self.compute_api._validate_vol_az_for_create(
|
||||
None, volumes))
|
||||
|
||||
@mock.patch.object(cinder.API, 'get_snapshot',
|
||||
side_effect=exception.CinderConnectionFailed(reason='error'))
|
||||
@mock.patch.object(cinder.API, 'get',
|
||||
side_effect=exception.CinderConnectionFailed(reason='error'))
|
||||
def test_validate_bdm_with_cinder_down(self, mock_get, mock_get_snapshot):
|
||||
def test_validate_bdm_with_cinder_down(self, mock_get_snapshot):
|
||||
instance = self._create_instance_obj()
|
||||
instance_type = self._create_flavor()
|
||||
bdm = [objects.BlockDeviceMapping(
|
||||
**fake_block_device.FakeDbBlockDeviceDict(
|
||||
{
|
||||
'id': 1,
|
||||
'volume_id': 1,
|
||||
'source_type': 'volume',
|
||||
'destination_type': 'volume',
|
||||
'device_name': 'vda',
|
||||
'boot_index': 0,
|
||||
}))]
|
||||
bdms = [objects.BlockDeviceMapping(
|
||||
**fake_block_device.FakeDbBlockDeviceDict(
|
||||
{
|
||||
@ -4232,20 +4271,15 @@ class _ComputeAPIUnitTestMixIn(object):
|
||||
'device_name': 'vda',
|
||||
'boot_index': 0,
|
||||
}))]
|
||||
image_cache = volumes = {}
|
||||
self.assertRaises(exception.CinderConnectionFailed,
|
||||
self.compute_api._validate_bdm,
|
||||
self.context,
|
||||
instance, instance_type, bdm, {})
|
||||
self.assertRaises(exception.CinderConnectionFailed,
|
||||
self.compute_api._validate_bdm,
|
||||
self.context,
|
||||
instance, instance_type, bdms, {})
|
||||
instance, instance_type, bdms, image_cache, volumes)
|
||||
|
||||
@mock.patch.object(cinder.API, 'get')
|
||||
@mock.patch.object(cinder.API, 'attachment_create',
|
||||
side_effect=exception.InvalidInput(reason='error'))
|
||||
def test_validate_bdm_with_error_volume_new_flow(self, mock_attach_create,
|
||||
mock_get):
|
||||
def test_validate_bdm_with_error_volume_new_flow(self, mock_attach_create):
|
||||
# Tests that an InvalidInput exception raised from
|
||||
# volume_api.attachment_create due to the volume status not being
|
||||
# 'available' results in _validate_bdm re-raising InvalidVolume.
|
||||
@ -4256,7 +4290,6 @@ class _ComputeAPIUnitTestMixIn(object):
|
||||
volume_info = {'status': 'error',
|
||||
'attach_status': 'detached',
|
||||
'id': volume_id, 'multiattach': False}
|
||||
mock_get.return_value = volume_info
|
||||
bdms = [objects.BlockDeviceMapping(
|
||||
**fake_block_device.FakeDbBlockDeviceDict(
|
||||
{
|
||||
@ -4270,9 +4303,9 @@ class _ComputeAPIUnitTestMixIn(object):
|
||||
self.assertRaises(exception.InvalidVolume,
|
||||
self.compute_api._validate_bdm,
|
||||
self.context,
|
||||
instance, instance_type, bdms, {})
|
||||
instance, instance_type, bdms, {},
|
||||
{volume_id: volume_info})
|
||||
|
||||
mock_get.assert_called_once_with(self.context, volume_id)
|
||||
mock_attach_create.assert_called_once_with(
|
||||
self.context, volume_id, instance.uuid)
|
||||
|
||||
@ -4283,10 +4316,11 @@ class _ComputeAPIUnitTestMixIn(object):
|
||||
objects.BlockDeviceMapping(
|
||||
boot_index=None, image_id=uuids.image_id,
|
||||
source_type='image', destination_type='volume')])
|
||||
image_cache = volumes = {}
|
||||
self.assertRaises(exception.InvalidBDMBootSequence,
|
||||
self.compute_api._validate_bdm,
|
||||
self.context, objects.Instance(), objects.Flavor(),
|
||||
bdms, {})
|
||||
bdms, image_cache, volumes)
|
||||
|
||||
def test_validate_bdm_with_volume_type_name_is_specified(self):
|
||||
"""Test _check_requested_volume_type method is used.
|
||||
@ -4329,8 +4363,10 @@ class _ComputeAPIUnitTestMixIn(object):
|
||||
'_check_requested_volume_type')) as (
|
||||
get_all_vol_types, vol_type_requested):
|
||||
|
||||
image_cache = volumes = {}
|
||||
self.compute_api._validate_bdm(self.context, instance,
|
||||
instance_type, bdms, {})
|
||||
instance_type, bdms, image_cache,
|
||||
volumes)
|
||||
|
||||
get_all_vol_types.assert_called_once_with(self.context)
|
||||
|
||||
@ -4351,10 +4387,11 @@ class _ComputeAPIUnitTestMixIn(object):
|
||||
source_type='image', destination_type='volume',
|
||||
volume_type=None, snapshot_id=None, volume_id=None,
|
||||
volume_size=None)])
|
||||
image_cache = volumes = {}
|
||||
self.assertRaises(exception.InvalidBDM,
|
||||
self.compute_api._validate_bdm,
|
||||
self.context, instance, objects.Flavor(),
|
||||
bdms, {})
|
||||
bdms, image_cache, volumes)
|
||||
self.assertEqual(0, mock_get_image.call_count)
|
||||
# then we test the case of instance.image_ref != bdm.image_id
|
||||
image_id = uuids.image_id
|
||||
@ -4367,7 +4404,7 @@ class _ComputeAPIUnitTestMixIn(object):
|
||||
self.assertRaises(exception.InvalidBDM,
|
||||
self.compute_api._validate_bdm,
|
||||
self.context, instance, objects.Flavor(),
|
||||
bdms, {})
|
||||
bdms, image_cache, volumes)
|
||||
mock_get_image.assert_called_once_with(self.context, image_id)
|
||||
|
||||
def test_the_specified_volume_type_id_assignment_to_name(self):
|
||||
@ -4487,6 +4524,7 @@ class _ComputeAPIUnitTestMixIn(object):
|
||||
mock_br, mock_rs):
|
||||
fake_keypair = objects.KeyPair(name='test')
|
||||
|
||||
@mock.patch.object(self.compute_api, '_get_volumes_for_bdms')
|
||||
@mock.patch.object(self.compute_api,
|
||||
'_create_reqspec_buildreq_instmapping',
|
||||
new=mock.MagicMock())
|
||||
@ -4496,7 +4534,7 @@ class _ComputeAPIUnitTestMixIn(object):
|
||||
'create_db_entry_for_new_instance')
|
||||
@mock.patch.object(self.compute_api,
|
||||
'_bdm_validate_set_size_and_instance')
|
||||
def do_test(mock_bdm_v, mock_cdb, mock_sg, mock_cniq):
|
||||
def do_test(mock_bdm_v, mock_cdb, mock_sg, mock_cniq, mock_get_vols):
|
||||
mock_cniq.return_value = 1
|
||||
self.compute_api._provision_instances(self.context,
|
||||
mock.sentinel.flavor,
|
||||
@ -4523,6 +4561,7 @@ class _ComputeAPIUnitTestMixIn(object):
|
||||
do_test()
|
||||
|
||||
def test_provision_instances_creates_build_request(self):
|
||||
@mock.patch.object(self.compute_api, '_get_volumes_for_bdms')
|
||||
@mock.patch.object(self.compute_api,
|
||||
'_create_reqspec_buildreq_instmapping')
|
||||
@mock.patch.object(objects.Instance, 'create')
|
||||
@ -4532,7 +4571,7 @@ class _ComputeAPIUnitTestMixIn(object):
|
||||
@mock.patch.object(objects.RequestSpec, 'from_components')
|
||||
def do_test(mock_req_spec_from_components, _mock_ensure_default,
|
||||
mock_check_num_inst_quota, mock_inst_create,
|
||||
mock_create_rs_br_im):
|
||||
mock_create_rs_br_im, mock_get_volumes):
|
||||
|
||||
min_count = 1
|
||||
max_count = 2
|
||||
@ -4595,7 +4634,8 @@ class _ComputeAPIUnitTestMixIn(object):
|
||||
instance_tags, trusted_certs, False)
|
||||
validate_bdm.assert_has_calls([mock.call(
|
||||
ctxt, test.MatchType(objects.Instance), flavor,
|
||||
block_device_mappings, {}, False)] * max_count)
|
||||
block_device_mappings, {}, mock_get_volumes.return_value,
|
||||
False)] * max_count)
|
||||
|
||||
for rs, br, im in instances_to_build:
|
||||
self.assertIsInstance(br.instance, objects.Instance)
|
||||
@ -4609,6 +4649,7 @@ class _ComputeAPIUnitTestMixIn(object):
|
||||
do_test()
|
||||
|
||||
def test_provision_instances_creates_instance_mapping(self):
|
||||
@mock.patch.object(self.compute_api, '_get_volumes_for_bdms')
|
||||
@mock.patch.object(self.compute_api,
|
||||
'_create_reqspec_buildreq_instmapping',
|
||||
new=mock.MagicMock())
|
||||
@ -4621,7 +4662,8 @@ class _ComputeAPIUnitTestMixIn(object):
|
||||
@mock.patch.object(objects.RequestSpec, 'from_components',
|
||||
mock.MagicMock())
|
||||
@mock.patch('nova.objects.InstanceMapping')
|
||||
def do_test(mock_inst_mapping, mock_check_num_inst_quota):
|
||||
def do_test(mock_inst_mapping, mock_check_num_inst_quota,
|
||||
mock_get_vols):
|
||||
inst_mapping_mock = mock.MagicMock()
|
||||
|
||||
mock_check_num_inst_quota.return_value = 1
|
||||
@ -5020,9 +5062,10 @@ class _ComputeAPIUnitTestMixIn(object):
|
||||
block_device_mapping)))
|
||||
|
||||
with mock.patch.object(self.compute_api, '_validate_bdm'):
|
||||
image_cache = volumes = {}
|
||||
bdms = self.compute_api._bdm_validate_set_size_and_instance(
|
||||
self.context, instance, instance_type, block_device_mapping,
|
||||
{})
|
||||
image_cache, volumes)
|
||||
|
||||
expected = [{'device_name': '/dev/sda1',
|
||||
'source_type': 'snapshot', 'destination_type': 'volume',
|
||||
|
@ -0,0 +1,18 @@
|
||||
---
|
||||
fixes:
|
||||
- |
|
||||
Long standing `bug 1694844`_ is now fixed where the following conditions
|
||||
would lead to a 400 error response during server create:
|
||||
|
||||
* ``[cinder]/cross_az_attach=False``
|
||||
* ``[DEFAULT]/default_schedule_zone=None``
|
||||
* A server is created without an availability zone specified but with
|
||||
pre-existing volume block device mappings
|
||||
|
||||
Before the bug was fixed, users would have to specify an availability zone
|
||||
that matches the zone that the volume(s) were in. With the fix, the compute
|
||||
API will implicitly create the server in the zone that the volume(s) are
|
||||
in as long as the volume zone is not the same as the
|
||||
``[DEFAULT]/default_availability_zone`` value (defaults to ``nova``).
|
||||
|
||||
.. _bug 1694844: https://launchpad.net/bugs/1694844
|
Loading…
x
Reference in New Issue
Block a user