Merge "Handle Cinder 3.27 style attachments in swap_volume"
This commit is contained in:
commit
5747308885
@ -13,7 +13,7 @@
|
||||
"disabled_reason": null,
|
||||
"report_count": 1,
|
||||
"forced_down": false,
|
||||
"version": 20,
|
||||
"version": 21,
|
||||
"availability_zone": null,
|
||||
"uuid": "fa69c544-906b-4a6a-a9c6-c1f7a8078c73"
|
||||
}
|
||||
|
@ -411,7 +411,8 @@ class VolumeAttachmentController(wsgi.Controller):
|
||||
new_volume)
|
||||
found = True
|
||||
break
|
||||
except exception.VolumeUnattached:
|
||||
except (exception.VolumeUnattached,
|
||||
exception.VolumeBDMNotFound):
|
||||
# The volume is not attached. Treat it as NotFound
|
||||
# by falling through.
|
||||
pass
|
||||
|
@ -3901,16 +3901,36 @@ class API(base.Base):
|
||||
self.volume_api.check_availability_zone(context, new_volume,
|
||||
instance=instance)
|
||||
self.volume_api.begin_detaching(context, old_volume['id'])
|
||||
self.volume_api.reserve_volume(context, new_volume['id'])
|
||||
|
||||
# Get the BDM for the attached (old) volume so we can tell if it was
|
||||
# attached with the new-style Cinder 3.27 API.
|
||||
bdm = objects.BlockDeviceMapping.get_by_volume_and_instance(
|
||||
context, old_volume['id'], instance.uuid)
|
||||
new_attachment_id = None
|
||||
if bdm.attachment_id is None:
|
||||
# This is an old-style attachment so reserve the new volume before
|
||||
# we cast to the compute host.
|
||||
self.volume_api.reserve_volume(context, new_volume['id'])
|
||||
else:
|
||||
# This is a new-style attachment so for the volume that we are
|
||||
# going to swap to, create a new volume attachment.
|
||||
new_attachment_id = self.volume_api.attachment_create(
|
||||
context, new_volume['id'], instance.uuid)['id']
|
||||
|
||||
try:
|
||||
self.compute_rpcapi.swap_volume(
|
||||
context, instance=instance,
|
||||
old_volume_id=old_volume['id'],
|
||||
new_volume_id=new_volume['id'])
|
||||
new_volume_id=new_volume['id'],
|
||||
new_attachment_id=new_attachment_id)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
self.volume_api.roll_detaching(context, old_volume['id'])
|
||||
self.volume_api.unreserve_volume(context, new_volume['id'])
|
||||
if new_attachment_id is None:
|
||||
self.volume_api.unreserve_volume(context, new_volume['id'])
|
||||
else:
|
||||
self.volume_api.attachment_delete(
|
||||
context, new_attachment_id)
|
||||
|
||||
@check_instance_lock
|
||||
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
|
||||
|
@ -481,7 +481,7 @@ class ComputeVirtAPI(virtapi.VirtAPI):
|
||||
class ComputeManager(manager.Manager):
|
||||
"""Manages the running instances from creation to destruction."""
|
||||
|
||||
target = messaging.Target(version='4.16')
|
||||
target = messaging.Target(version='4.17')
|
||||
|
||||
# How long to wait in seconds before re-issuing a shutdown
|
||||
# signal to an instance during power off. The overall
|
||||
@ -5008,11 +5008,24 @@ class ComputeManager(manager.Manager):
|
||||
attachment_id=attachment_id)
|
||||
|
||||
def _init_volume_connection(self, context, new_volume_id,
|
||||
old_volume_id, connector, instance, bdm):
|
||||
old_volume_id, connector, bdm,
|
||||
new_attachment_id):
|
||||
|
||||
if new_attachment_id is None:
|
||||
# We're dealing with an old-style attachment so initialize the
|
||||
# connection so we can get the connection_info.
|
||||
new_cinfo = self.volume_api.initialize_connection(context,
|
||||
new_volume_id,
|
||||
connector)
|
||||
else:
|
||||
# This is a new style attachment and the API created the new
|
||||
# volume attachment and passed the id to the compute over RPC.
|
||||
# At this point we need to update the new volume attachment with
|
||||
# the host connector, which will give us back the new attachment
|
||||
# connection_info.
|
||||
new_cinfo = self.volume_api.attachment_update(
|
||||
context, new_attachment_id, connector).connection_info
|
||||
|
||||
new_cinfo = self.volume_api.initialize_connection(context,
|
||||
new_volume_id,
|
||||
connector)
|
||||
old_cinfo = jsonutils.loads(bdm['connection_info'])
|
||||
if old_cinfo and 'serial' not in old_cinfo:
|
||||
old_cinfo['serial'] = old_volume_id
|
||||
@ -5024,17 +5037,15 @@ class ComputeManager(manager.Manager):
|
||||
return (old_cinfo, new_cinfo)
|
||||
|
||||
def _swap_volume(self, context, instance, bdm, connector,
|
||||
old_volume_id, new_volume_id, resize_to):
|
||||
old_volume_id, new_volume_id, resize_to,
|
||||
new_attachment_id, is_cinder_migration):
|
||||
mountpoint = bdm['device_name']
|
||||
failed = False
|
||||
new_cinfo = None
|
||||
try:
|
||||
old_cinfo, new_cinfo = self._init_volume_connection(context,
|
||||
new_volume_id,
|
||||
old_volume_id,
|
||||
connector,
|
||||
instance,
|
||||
bdm)
|
||||
old_cinfo, new_cinfo = self._init_volume_connection(
|
||||
context, new_volume_id, old_volume_id, connector,
|
||||
bdm, new_attachment_id)
|
||||
# NOTE(lyarwood): The Libvirt driver, the only virt driver
|
||||
# currently implementing swap_volume, will modify the contents of
|
||||
# new_cinfo when connect_volume is called. This is then saved to
|
||||
@ -5069,39 +5080,85 @@ class ComputeManager(manager.Manager):
|
||||
LOG.exception(msg, {'volume_id': new_volume_id,
|
||||
'mountpoint': bdm['device_name']},
|
||||
instance=instance)
|
||||
|
||||
# The API marked the volume as 'detaching' for the old volume
|
||||
# so we need to roll that back so the volume goes back to
|
||||
# 'in-use' state.
|
||||
self.volume_api.roll_detaching(context, old_volume_id)
|
||||
self.volume_api.unreserve_volume(context, new_volume_id)
|
||||
|
||||
if new_attachment_id is None:
|
||||
# The API reserved the new volume so it would be in
|
||||
# 'attaching' status, so we need to unreserve it so it
|
||||
# goes back to 'available' status.
|
||||
self.volume_api.unreserve_volume(context, new_volume_id)
|
||||
else:
|
||||
# This is a new style attachment for the new volume, which
|
||||
# was created in the API. We just need to delete it here
|
||||
# to put the new volume back into 'available' status.
|
||||
self.volume_api.attachment_delete(
|
||||
context, new_attachment_id)
|
||||
finally:
|
||||
# TODO(mriedem): This finally block is terribly confusing and is
|
||||
# trying to do too much. We should consider removing the finally
|
||||
# block and move whatever needs to happen on success and failure
|
||||
# into the blocks above for clarity, even if it means a bit of
|
||||
# redundant code.
|
||||
conn_volume = new_volume_id if failed else old_volume_id
|
||||
if new_cinfo:
|
||||
LOG.debug("swap_volume: calling Cinder terminate_connection "
|
||||
"for %(volume)s", {'volume': conn_volume},
|
||||
LOG.debug("swap_volume: removing Cinder connection "
|
||||
"for volume %(volume)s", {'volume': conn_volume},
|
||||
instance=instance)
|
||||
if bdm.attachment_id is None:
|
||||
# This is the pre-3.27 flow for new-style volume
|
||||
# attachments so just terminate the connection.
|
||||
self.volume_api.terminate_connection(context,
|
||||
conn_volume,
|
||||
connector)
|
||||
else:
|
||||
# This is a new style volume attachment. If we failed, then
|
||||
# the new attachment was already deleted above in the
|
||||
# exception block and we have nothing more to do here. If
|
||||
# swap_volume was successful in the driver, then we need to
|
||||
# "detach" the original attachment by deleting it.
|
||||
if not failed:
|
||||
self.volume_api.attachment_delete(
|
||||
context, bdm.attachment_id)
|
||||
|
||||
# Need to make some decisions based on whether this was
|
||||
# a Cinder initiated migration or not. The callback to
|
||||
# migration completion isn't needed in the case of a
|
||||
# nova initiated simple swap of two volume
|
||||
# "volume-update" call so skip that. The new attachment
|
||||
# scenarios will give us a new attachment record and
|
||||
# that's what we want.
|
||||
if bdm.attachment_id and not is_cinder_migration:
|
||||
# we don't callback to cinder
|
||||
comp_ret = {'save_volume_id': new_volume_id}
|
||||
else:
|
||||
# NOTE(lyarwood): The following call to
|
||||
# os-migrate-volume-completion returns a dict containing
|
||||
# save_volume_id, this volume id has two possible values :
|
||||
# 1. old_volume_id if we are migrating (retyping) volumes
|
||||
# 2. new_volume_id if we are swapping between two existing
|
||||
# volumes
|
||||
# This volume id is later used to update the volume_id and
|
||||
# connection_info['serial'] of the BDM.
|
||||
comp_ret = self.volume_api.migrate_volume_completion(
|
||||
context,
|
||||
old_volume_id,
|
||||
new_volume_id,
|
||||
error=failed)
|
||||
LOG.debug("swap_volume: Cinder migrate_volume_completion "
|
||||
"returned: %(comp_ret)s", {'comp_ret': comp_ret},
|
||||
instance=instance)
|
||||
self.volume_api.terminate_connection(context,
|
||||
conn_volume,
|
||||
connector)
|
||||
# NOTE(lyarwood): The following call to
|
||||
# os-migrate-volume-completion returns a dict containing
|
||||
# save_volume_id, this volume id has two possible values :
|
||||
# 1. old_volume_id if we are migrating (retyping) volumes
|
||||
# 2. new_volume_id if we are swapping between two existing volumes
|
||||
# This volume id is later used to update the volume_id and
|
||||
# connection_info['serial'] of the BDM.
|
||||
comp_ret = self.volume_api.migrate_volume_completion(
|
||||
context,
|
||||
old_volume_id,
|
||||
new_volume_id,
|
||||
error=failed)
|
||||
LOG.debug("swap_volume: Cinder migrate_volume_completion "
|
||||
"returned: %(comp_ret)s", {'comp_ret': comp_ret},
|
||||
instance=instance)
|
||||
|
||||
return (comp_ret, new_cinfo)
|
||||
|
||||
@wrap_exception()
|
||||
@reverts_task_state
|
||||
@wrap_instance_fault
|
||||
def swap_volume(self, context, old_volume_id, new_volume_id, instance):
|
||||
def swap_volume(self, context, old_volume_id, new_volume_id, instance,
|
||||
new_attachment_id=None):
|
||||
"""Swap volume for an instance."""
|
||||
context = context.elevated()
|
||||
|
||||
@ -5116,7 +5173,16 @@ class ComputeManager(manager.Manager):
|
||||
connector = self.driver.get_volume_connector(instance)
|
||||
|
||||
resize_to = 0
|
||||
old_vol_size = self.volume_api.get(context, old_volume_id)['size']
|
||||
old_volume = self.volume_api.get(context, old_volume_id)
|
||||
# Yes this is a tightly-coupled state check of what's going on inside
|
||||
# cinder, but we need this while we still support old (v1/v2) and
|
||||
# new style attachments (v3.27). Once we drop support for old style
|
||||
# attachments we could think about cleaning up the cinder-initiated
|
||||
# swap volume API flows.
|
||||
is_cinder_migration = (
|
||||
True if old_volume['status'] in ('retyping',
|
||||
'migrating') else False)
|
||||
old_vol_size = old_volume['size']
|
||||
new_vol_size = self.volume_api.get(context, new_volume_id)['size']
|
||||
if new_vol_size > old_vol_size:
|
||||
resize_to = new_vol_size
|
||||
@ -5124,12 +5190,15 @@ class ComputeManager(manager.Manager):
|
||||
LOG.info('Swapping volume %(old_volume)s for %(new_volume)s',
|
||||
{'old_volume': old_volume_id, 'new_volume': new_volume_id},
|
||||
instance=instance)
|
||||
comp_ret, new_cinfo = self._swap_volume(context, instance,
|
||||
bdm,
|
||||
connector,
|
||||
old_volume_id,
|
||||
new_volume_id,
|
||||
resize_to)
|
||||
comp_ret, new_cinfo = self._swap_volume(context,
|
||||
instance,
|
||||
bdm,
|
||||
connector,
|
||||
old_volume_id,
|
||||
new_volume_id,
|
||||
resize_to,
|
||||
new_attachment_id,
|
||||
is_cinder_migration)
|
||||
|
||||
# NOTE(lyarwood): Update the BDM with the modified new_cinfo and
|
||||
# correct volume_id returned by Cinder.
|
||||
@ -5146,6 +5215,11 @@ class ComputeManager(manager.Manager):
|
||||
if resize_to:
|
||||
values['volume_size'] = resize_to
|
||||
|
||||
if new_attachment_id is not None:
|
||||
# This was a volume swap for a new-style attachment so we
|
||||
# need to update the BDM attachment_id for the new attachment.
|
||||
values['attachment_id'] = new_attachment_id
|
||||
|
||||
LOG.debug("swap_volume: Updating volume %(volume_id)s BDM record with "
|
||||
"%(updates)s", {'volume_id': bdm.volume_id,
|
||||
'updates': values},
|
||||
|
@ -326,6 +326,7 @@ class ComputeAPI(object):
|
||||
was bumped to signal the availability of the corrected RPC API
|
||||
* 4.15 - Add tag argument to reserve_block_device_name()
|
||||
* 4.16 - Add tag argument to attach_interface()
|
||||
* 4.17 - Add new_attachment_id to swap_volume.
|
||||
'''
|
||||
|
||||
VERSION_ALIASES = {
|
||||
@ -924,13 +925,20 @@ class ComputeAPI(object):
|
||||
server=host, version=version)
|
||||
return cctxt.call(ctxt, 'set_host_enabled', enabled=enabled)
|
||||
|
||||
def swap_volume(self, ctxt, instance, old_volume_id, new_volume_id):
|
||||
version = '4.0'
|
||||
cctxt = self.router.client(ctxt).prepare(
|
||||
server=_compute_host(None, instance), version=version)
|
||||
cctxt.cast(ctxt, 'swap_volume',
|
||||
instance=instance, old_volume_id=old_volume_id,
|
||||
new_volume_id=new_volume_id)
|
||||
def swap_volume(self, ctxt, instance, old_volume_id, new_volume_id,
|
||||
new_attachment_id=None):
|
||||
version = '4.17'
|
||||
client = self.router.client(ctxt)
|
||||
kwargs = dict(instance=instance,
|
||||
old_volume_id=old_volume_id,
|
||||
new_volume_id=new_volume_id,
|
||||
new_attachment_id=new_attachment_id)
|
||||
if not client.can_send_version(version):
|
||||
version = '4.0'
|
||||
kwargs.pop('new_attachment_id')
|
||||
cctxt = client.prepare(
|
||||
server=_compute_host(None, instance), version=version)
|
||||
cctxt.cast(ctxt, 'swap_volume', **kwargs)
|
||||
|
||||
def get_host_uptime(self, ctxt, host):
|
||||
version = '4.0'
|
||||
|
@ -32,7 +32,7 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# NOTE(danms): This is the global service version counter
|
||||
SERVICE_VERSION = 20
|
||||
SERVICE_VERSION = 21
|
||||
|
||||
|
||||
# NOTE(danms): This is our SERVICE_VERSION history. The idea is that any
|
||||
@ -108,6 +108,8 @@ SERVICE_VERSION_HISTORY = (
|
||||
{'compute_rpc': '4.15'},
|
||||
# Version 20: Compute RPC version 4.16
|
||||
{'compute_rpc': '4.16'},
|
||||
# Version 21: Compute RPC version 4.17
|
||||
{'compute_rpc': '4.17'},
|
||||
)
|
||||
|
||||
|
||||
|
@ -688,6 +688,16 @@ class VolumeAttachTestsV21(test.NoDBTestCase):
|
||||
self.attachments,
|
||||
body=body)
|
||||
|
||||
def test_swap_volume_for_bdm_not_found(self):
|
||||
|
||||
def fake_swap_volume_for_bdm_not_found(self, context, instance,
|
||||
old_volume, new_volume):
|
||||
raise exception.VolumeBDMNotFound(volume_id=FAKE_UUID_C)
|
||||
|
||||
self.assertRaises(webob.exc.HTTPNotFound, self._test_swap,
|
||||
self.attachments,
|
||||
fake_func=fake_swap_volume_for_bdm_not_found)
|
||||
|
||||
|
||||
class VolumeAttachTestsV249(test.NoDBTestCase):
|
||||
validation_error = exception.ValidationError
|
||||
|
@ -2416,7 +2416,14 @@ class _ComputeAPIUnitTestMixIn(object):
|
||||
def test_swap_volume_volume_api_usage(self):
|
||||
self._test_swap_volume()
|
||||
|
||||
def _test_swap_volume(self, expected_exception=None):
|
||||
def test_swap_volume_volume_api_usage_new_attach_flow(self):
|
||||
self._test_swap_volume(attachment_id=uuids.attachment_id)
|
||||
|
||||
def test_swap_volume_with_swap_volume_error_new_attach_flow(self):
|
||||
self._test_swap_volume(expected_exception=AttributeError,
|
||||
attachment_id=uuids.attachment_id)
|
||||
|
||||
def _test_swap_volume(self, expected_exception=None, attachment_id=None):
|
||||
volumes = self._get_volumes_for_test_swap_volume()
|
||||
instance = self._get_instance_for_test_swap_volume()
|
||||
|
||||
@ -2439,19 +2446,45 @@ class _ComputeAPIUnitTestMixIn(object):
|
||||
if volumes[volume_id]['status'] == 'attaching':
|
||||
volumes[volume_id]['status'] = 'available'
|
||||
|
||||
def fake_vol_api_attachment_create(context, volume_id, instance_id):
|
||||
self.assertTrue(uuidutils.is_uuid_like(volume_id))
|
||||
self.assertEqual('available', volumes[volume_id]['status'])
|
||||
volumes[volume_id]['status'] = 'reserved'
|
||||
return {'id': uuids.attachment_id}
|
||||
|
||||
def fake_vol_api_attachment_delete(context, attachment_id):
|
||||
self.assertTrue(uuidutils.is_uuid_like(attachment_id))
|
||||
if volumes[uuids.new_volume]['status'] == 'reserved':
|
||||
volumes[uuids.new_volume]['status'] = 'available'
|
||||
|
||||
@mock.patch.object(self.compute_api.compute_rpcapi, 'swap_volume',
|
||||
return_value=True)
|
||||
@mock.patch.object(self.compute_api.volume_api, 'unreserve_volume',
|
||||
side_effect=fake_vol_api_unreserve)
|
||||
@mock.patch.object(self.compute_api.volume_api, 'attachment_delete',
|
||||
side_effect=fake_vol_api_attachment_delete)
|
||||
@mock.patch.object(self.compute_api.volume_api, 'reserve_volume',
|
||||
side_effect=fake_vol_api_reserve)
|
||||
@mock.patch.object(self.compute_api.volume_api, 'attachment_create',
|
||||
side_effect=fake_vol_api_attachment_create)
|
||||
@mock.patch.object(self.compute_api.volume_api, 'roll_detaching',
|
||||
side_effect=fake_vol_api_roll_detaching)
|
||||
@mock.patch.object(objects.BlockDeviceMapping,
|
||||
'get_by_volume_and_instance')
|
||||
@mock.patch.object(self.compute_api.volume_api, 'begin_detaching',
|
||||
side_effect=fake_vol_api_begin_detaching)
|
||||
def _do_test(mock_begin_detaching, mock_roll_detaching,
|
||||
mock_reserve_volume, mock_unreserve_volume,
|
||||
mock_swap_volume):
|
||||
def _do_test(mock_begin_detaching, mock_get_by_volume_and_instance,
|
||||
mock_roll_detaching, mock_attachment_create,
|
||||
mock_reserve_volume, mock_attachment_delete,
|
||||
mock_unreserve_volume, mock_swap_volume):
|
||||
bdm = objects.BlockDeviceMapping(
|
||||
**fake_block_device.FakeDbBlockDeviceDict(
|
||||
{'no_device': False, 'volume_id': '1', 'boot_index': 0,
|
||||
'connection_info': 'inf', 'device_name': '/dev/vda',
|
||||
'source_type': 'volume', 'destination_type': 'volume',
|
||||
'tag': None, 'attachment_id': attachment_id},
|
||||
anon=True))
|
||||
mock_get_by_volume_and_instance.return_value = bdm
|
||||
if expected_exception:
|
||||
mock_swap_volume.side_effect = AttributeError()
|
||||
self.assertRaises(expected_exception,
|
||||
@ -2461,10 +2494,45 @@ class _ComputeAPIUnitTestMixIn(object):
|
||||
self.assertEqual('in-use', volumes[uuids.old_volume]['status'])
|
||||
self.assertEqual('available',
|
||||
volumes[uuids.new_volume]['status'])
|
||||
# Make assertions about what was called if there was or was not
|
||||
# a Cinder 3.27 style attachment provided.
|
||||
if attachment_id is None:
|
||||
# Old style attachment, so unreserve was called and
|
||||
# attachment_delete was not called.
|
||||
mock_unreserve_volume.assert_called_once_with(
|
||||
self.context, uuids.new_volume)
|
||||
mock_attachment_delete.assert_not_called()
|
||||
else:
|
||||
# New style attachment, so unreserve was not called and
|
||||
# attachment_delete was called.
|
||||
mock_unreserve_volume.assert_not_called()
|
||||
mock_attachment_delete.assert_called_once_with(
|
||||
self.context, attachment_id)
|
||||
else:
|
||||
self.compute_api.swap_volume(self.context, instance,
|
||||
volumes[uuids.old_volume],
|
||||
volumes[uuids.new_volume])
|
||||
# Make assertions about what was called if there was or was not
|
||||
# a Cinder 3.27 style attachment provided.
|
||||
if attachment_id is None:
|
||||
# Old style attachment, so reserve was called and
|
||||
# attachment_create was not called.
|
||||
mock_reserve_volume.assert_called_once_with(
|
||||
self.context, uuids.new_volume)
|
||||
mock_attachment_create.assert_not_called()
|
||||
else:
|
||||
# New style attachment, so reserve was not called and
|
||||
# attachment_create was called.
|
||||
mock_reserve_volume.assert_not_called()
|
||||
mock_attachment_create.assert_called_once_with(
|
||||
self.context, uuids.new_volume, instance.uuid)
|
||||
|
||||
# Assert the call to the rpcapi.
|
||||
mock_swap_volume.assert_called_once_with(
|
||||
self.context, instance=instance,
|
||||
old_volume_id=uuids.old_volume,
|
||||
new_volume_id=uuids.new_volume,
|
||||
new_attachment_id=attachment_id)
|
||||
|
||||
_do_test()
|
||||
|
||||
|
@ -1870,7 +1870,9 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
|
||||
{'device_name': '/dev/vdb', 'source_type': 'volume',
|
||||
'destination_type': 'volume',
|
||||
'instance_uuid': uuids.instance,
|
||||
'connection_info': '{"foo": "bar"}'})
|
||||
'connection_info': '{"foo": "bar"}',
|
||||
'volume_id': uuids.old_volume,
|
||||
'attachment_id': None})
|
||||
|
||||
def fake_vol_api_roll_detaching(context, volume_id):
|
||||
self.assertTrue(uuidutils.is_uuid_like(volume_id))
|
||||
@ -2014,7 +2016,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
|
||||
'destination_type': 'volume',
|
||||
'instance_uuid': uuids.instance,
|
||||
'delete_on_termination': True,
|
||||
'connection_info': '{"foo": "bar"}'})
|
||||
'connection_info': '{"foo": "bar"}',
|
||||
'attachment_id': None})
|
||||
comp_ret = {'save_volume_id': old_volume_id}
|
||||
new_info = {"foo": "bar", "serial": old_volume_id}
|
||||
swap_volume_mock.return_value = (comp_ret, new_info)
|
||||
@ -2034,6 +2037,261 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
|
||||
update_bdm_mock.assert_called_once_with(mock.ANY, mock.ANY,
|
||||
update_values, legacy=False)
|
||||
|
||||
@mock.patch.object(compute_utils, 'notify_about_volume_swap')
|
||||
@mock.patch.object(objects.BlockDeviceMapping,
|
||||
'get_by_volume_and_instance')
|
||||
@mock.patch('nova.volume.cinder.API.get')
|
||||
@mock.patch('nova.volume.cinder.API.attachment_update',
|
||||
return_value=mock.Mock(connection_info={}))
|
||||
@mock.patch('nova.volume.cinder.API.attachment_delete')
|
||||
@mock.patch('nova.volume.cinder.API.migrate_volume_completion',
|
||||
return_value={'save_volume_id': uuids.old_volume_id})
|
||||
def test_swap_volume_with_new_attachment_id_cinder_migrate_true(
|
||||
self, migrate_volume_completion, attachment_delete,
|
||||
attachment_update, get_volume, get_bdm, notify_about_volume_swap):
|
||||
"""Tests a swap volume operation with a new style volume attachment
|
||||
passed in from the compute API, and the case that Cinder initiated
|
||||
the swap volume because of a volume retype situation. This is a happy
|
||||
path test. Since it is a retype there is no volume size change.
|
||||
"""
|
||||
bdm = objects.BlockDeviceMapping(
|
||||
volume_id=uuids.old_volume_id, device_name='/dev/vda',
|
||||
attachment_id=uuids.old_attachment_id,
|
||||
connection_info='{"data": {}}', volume_size=1)
|
||||
old_volume = {
|
||||
'id': uuids.old_volume_id, 'size': 1, 'status': 'retyping'
|
||||
}
|
||||
new_volume = {
|
||||
'id': uuids.new_volume_id, 'size': 1, 'status': 'reserved'
|
||||
}
|
||||
get_bdm.return_value = bdm
|
||||
get_volume.side_effect = (old_volume, new_volume)
|
||||
instance = fake_instance.fake_instance_obj(self.context)
|
||||
with test.nested(
|
||||
mock.patch.object(self.context, 'elevated',
|
||||
return_value=self.context),
|
||||
mock.patch.object(self.compute.driver, 'get_volume_connector',
|
||||
return_value=mock.sentinel.connector),
|
||||
mock.patch.object(bdm, 'save')
|
||||
) as (
|
||||
mock_elevated, mock_get_volume_connector, mock_save
|
||||
):
|
||||
self.compute.swap_volume(
|
||||
self.context, uuids.old_volume_id, uuids.new_volume_id,
|
||||
instance, uuids.new_attachment_id)
|
||||
# Assert the expected calls.
|
||||
get_bdm.assert_called_once_with(
|
||||
self.context, uuids.old_volume_id, instance.uuid)
|
||||
# We updated the new attachment with the host connector.
|
||||
attachment_update.assert_called_once_with(
|
||||
self.context, uuids.new_attachment_id, mock.sentinel.connector)
|
||||
# After a successful swap volume, we deleted the old attachment.
|
||||
attachment_delete.assert_called_once_with(
|
||||
self.context, uuids.old_attachment_id)
|
||||
# After a successful swap volume, we tell Cinder so it can complete
|
||||
# the retype operation.
|
||||
migrate_volume_completion.assert_called_once_with(
|
||||
self.context, uuids.old_volume_id, uuids.new_volume_id,
|
||||
error=False)
|
||||
# The BDM should have been updated. Since it's a retype, the old
|
||||
# volume ID is returned from Cinder so that's what goes into the
|
||||
# BDM but the new attachment ID is saved.
|
||||
mock_save.assert_called_once_with()
|
||||
self.assertEqual(uuids.old_volume_id, bdm.volume_id)
|
||||
self.assertEqual(uuids.new_attachment_id, bdm.attachment_id)
|
||||
self.assertEqual(1, bdm.volume_size)
|
||||
self.assertEqual(uuids.old_volume_id,
|
||||
jsonutils.loads(bdm.connection_info)['serial'])
|
||||
|
||||
@mock.patch.object(compute_utils, 'notify_about_volume_swap')
|
||||
@mock.patch.object(objects.BlockDeviceMapping,
|
||||
'get_by_volume_and_instance')
|
||||
@mock.patch('nova.volume.cinder.API.get')
|
||||
@mock.patch('nova.volume.cinder.API.attachment_update',
|
||||
return_value=mock.Mock(connection_info={}))
|
||||
@mock.patch('nova.volume.cinder.API.attachment_delete')
|
||||
@mock.patch('nova.volume.cinder.API.migrate_volume_completion')
|
||||
def test_swap_volume_with_new_attachment_id_cinder_migrate_false(
|
||||
self, migrate_volume_completion, attachment_delete,
|
||||
attachment_update, get_volume, get_bdm, notify_about_volume_swap):
|
||||
"""Tests a swap volume operation with a new style volume attachment
|
||||
passed in from the compute API, and the case that Cinder did not
|
||||
initiate the swap volume. This is a happy path test. Since it is not a
|
||||
retype we also change the size.
|
||||
"""
|
||||
bdm = objects.BlockDeviceMapping(
|
||||
volume_id=uuids.old_volume_id, device_name='/dev/vda',
|
||||
attachment_id=uuids.old_attachment_id,
|
||||
connection_info='{"data": {}}')
|
||||
old_volume = {
|
||||
'id': uuids.old_volume_id, 'size': 1, 'status': 'detaching'
|
||||
}
|
||||
new_volume = {
|
||||
'id': uuids.new_volume_id, 'size': 2, 'status': 'reserved'
|
||||
}
|
||||
get_bdm.return_value = bdm
|
||||
get_volume.side_effect = (old_volume, new_volume)
|
||||
instance = fake_instance.fake_instance_obj(self.context)
|
||||
with test.nested(
|
||||
mock.patch.object(self.context, 'elevated',
|
||||
return_value=self.context),
|
||||
mock.patch.object(self.compute.driver, 'get_volume_connector',
|
||||
return_value=mock.sentinel.connector),
|
||||
mock.patch.object(bdm, 'save')
|
||||
) as (
|
||||
mock_elevated, mock_get_volume_connector, mock_save
|
||||
):
|
||||
self.compute.swap_volume(
|
||||
self.context, uuids.old_volume_id, uuids.new_volume_id,
|
||||
instance, uuids.new_attachment_id)
|
||||
# Assert the expected calls.
|
||||
get_bdm.assert_called_once_with(
|
||||
self.context, uuids.old_volume_id, instance.uuid)
|
||||
# We updated the new attachment with the host connector.
|
||||
attachment_update.assert_called_once_with(
|
||||
self.context, uuids.new_attachment_id, mock.sentinel.connector)
|
||||
# After a successful swap volume, we deleted the old attachment.
|
||||
attachment_delete.assert_called_once_with(
|
||||
self.context, uuids.old_attachment_id)
|
||||
# After a successful swap volume, since it was not a
|
||||
# Cinder-initiated call, we don't call migrate_volume_completion.
|
||||
migrate_volume_completion.assert_not_called()
|
||||
# The BDM should have been updated. Since it's a not a retype, the
|
||||
# volume_id is now the new volume ID.
|
||||
mock_save.assert_called_once_with()
|
||||
self.assertEqual(uuids.new_volume_id, bdm.volume_id)
|
||||
self.assertEqual(uuids.new_attachment_id, bdm.attachment_id)
|
||||
self.assertEqual(2, bdm.volume_size)
|
||||
self.assertEqual(uuids.new_volume_id,
|
||||
jsonutils.loads(bdm.connection_info)['serial'])
|
||||
|
||||
@mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
|
||||
@mock.patch.object(compute_utils, 'notify_about_volume_swap')
|
||||
@mock.patch.object(objects.BlockDeviceMapping,
|
||||
'get_by_volume_and_instance')
|
||||
@mock.patch('nova.volume.cinder.API.get')
|
||||
@mock.patch('nova.volume.cinder.API.attachment_update',
|
||||
side_effect=exception.VolumeAttachmentNotFound(
|
||||
attachment_id=uuids.new_attachment_id))
|
||||
@mock.patch('nova.volume.cinder.API.roll_detaching')
|
||||
@mock.patch('nova.volume.cinder.API.attachment_delete')
|
||||
@mock.patch('nova.volume.cinder.API.migrate_volume_completion')
|
||||
def test_swap_volume_with_new_attachment_id_attachment_update_fails(
|
||||
self, migrate_volume_completion, attachment_delete, roll_detaching,
|
||||
attachment_update, get_volume, get_bdm, notify_about_volume_swap,
|
||||
add_instance_fault_from_exc):
|
||||
"""Tests a swap volume operation with a new style volume attachment
|
||||
passed in from the compute API, and the case that Cinder initiated
|
||||
the swap volume because of a volume migrate situation. This is a
|
||||
negative test where attachment_update fails.
|
||||
"""
|
||||
bdm = objects.BlockDeviceMapping(
|
||||
volume_id=uuids.old_volume_id, device_name='/dev/vda',
|
||||
attachment_id=uuids.old_attachment_id,
|
||||
connection_info='{"data": {}}')
|
||||
old_volume = {
|
||||
'id': uuids.old_volume_id, 'size': 1, 'status': 'migrating'
|
||||
}
|
||||
new_volume = {
|
||||
'id': uuids.new_volume_id, 'size': 1, 'status': 'reserved'
|
||||
}
|
||||
get_bdm.return_value = bdm
|
||||
get_volume.side_effect = (old_volume, new_volume)
|
||||
instance = fake_instance.fake_instance_obj(self.context)
|
||||
with test.nested(
|
||||
mock.patch.object(self.context, 'elevated',
|
||||
return_value=self.context),
|
||||
mock.patch.object(self.compute.driver, 'get_volume_connector',
|
||||
return_value=mock.sentinel.connector)
|
||||
) as (
|
||||
mock_elevated, mock_get_volume_connector
|
||||
):
|
||||
self.assertRaises(
|
||||
exception.VolumeAttachmentNotFound, self.compute.swap_volume,
|
||||
self.context, uuids.old_volume_id, uuids.new_volume_id,
|
||||
instance, uuids.new_attachment_id)
|
||||
# Assert the expected calls.
|
||||
get_bdm.assert_called_once_with(
|
||||
self.context, uuids.old_volume_id, instance.uuid)
|
||||
# We tried to update the new attachment with the host connector.
|
||||
attachment_update.assert_called_once_with(
|
||||
self.context, uuids.new_attachment_id, mock.sentinel.connector)
|
||||
# After a failure, we rollback the detaching status of the old
|
||||
# volume.
|
||||
roll_detaching.assert_called_once_with(
|
||||
self.context, uuids.old_volume_id)
|
||||
# After a failure, we deleted the new attachment.
|
||||
attachment_delete.assert_called_once_with(
|
||||
self.context, uuids.new_attachment_id)
|
||||
# After a failure for a Cinder-initiated swap volume, we called
|
||||
# migrate_volume_completion to let Cinder know things blew up.
|
||||
migrate_volume_completion.assert_called_once_with(
|
||||
self.context, uuids.old_volume_id, uuids.new_volume_id,
|
||||
error=True)
|
||||
|
||||
@mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
|
||||
@mock.patch.object(compute_utils, 'notify_about_volume_swap')
|
||||
@mock.patch.object(objects.BlockDeviceMapping,
|
||||
'get_by_volume_and_instance')
|
||||
@mock.patch('nova.volume.cinder.API.get')
|
||||
@mock.patch('nova.volume.cinder.API.attachment_update',
|
||||
return_value=mock.Mock(connection_info={}))
|
||||
@mock.patch('nova.volume.cinder.API.roll_detaching')
|
||||
@mock.patch('nova.volume.cinder.API.attachment_delete')
|
||||
@mock.patch('nova.volume.cinder.API.migrate_volume_completion')
|
||||
def test_swap_volume_with_new_attachment_id_driver_swap_fails(
|
||||
self, migrate_volume_completion, attachment_delete, roll_detaching,
|
||||
attachment_update, get_volume, get_bdm, notify_about_volume_swap,
|
||||
add_instance_fault_from_exc):
|
||||
"""Tests a swap volume operation with a new style volume attachment
|
||||
passed in from the compute API, and the case that Cinder did not
|
||||
initiate the swap volume. This is a negative test where the compute
|
||||
driver swap_volume method fails.
|
||||
"""
|
||||
bdm = objects.BlockDeviceMapping(
|
||||
volume_id=uuids.old_volume_id, device_name='/dev/vda',
|
||||
attachment_id=uuids.old_attachment_id,
|
||||
connection_info='{"data": {}}')
|
||||
old_volume = {
|
||||
'id': uuids.old_volume_id, 'size': 1, 'status': 'detaching'
|
||||
}
|
||||
new_volume = {
|
||||
'id': uuids.new_volume_id, 'size': 2, 'status': 'reserved'
|
||||
}
|
||||
get_bdm.return_value = bdm
|
||||
get_volume.side_effect = (old_volume, new_volume)
|
||||
instance = fake_instance.fake_instance_obj(self.context)
|
||||
with test.nested(
|
||||
mock.patch.object(self.context, 'elevated',
|
||||
return_value=self.context),
|
||||
mock.patch.object(self.compute.driver, 'get_volume_connector',
|
||||
return_value=mock.sentinel.connector),
|
||||
mock.patch.object(self.compute.driver, 'swap_volume',
|
||||
side_effect=test.TestingException('yikes'))
|
||||
) as (
|
||||
mock_elevated, mock_get_volume_connector, mock_save
|
||||
):
|
||||
self.assertRaises(
|
||||
test.TestingException, self.compute.swap_volume,
|
||||
self.context, uuids.old_volume_id, uuids.new_volume_id,
|
||||
instance, uuids.new_attachment_id)
|
||||
# Assert the expected calls.
|
||||
get_bdm.assert_called_once_with(
|
||||
self.context, uuids.old_volume_id, instance.uuid)
|
||||
# We updated the new attachment with the host connector.
|
||||
attachment_update.assert_called_once_with(
|
||||
self.context, uuids.new_attachment_id, mock.sentinel.connector)
|
||||
# After a failure, we rollback the detaching status of the old
|
||||
# volume.
|
||||
roll_detaching.assert_called_once_with(
|
||||
self.context, uuids.old_volume_id)
|
||||
# After a failed swap volume, we deleted the new attachment.
|
||||
attachment_delete.assert_called_once_with(
|
||||
self.context, uuids.new_attachment_id)
|
||||
# After a failed swap volume, since it was not a
|
||||
# Cinder-initiated call, we don't call migrate_volume_completion.
|
||||
migrate_volume_completion.assert_not_called()
|
||||
|
||||
@mock.patch.object(fake_driver.FakeDriver,
|
||||
'check_can_live_migrate_source')
|
||||
@mock.patch.object(manager.ComputeManager,
|
||||
|
@ -30,6 +30,7 @@ from nova import test
|
||||
from nova.tests.unit import fake_block_device
|
||||
from nova.tests.unit import fake_flavor
|
||||
from nova.tests.unit import fake_instance
|
||||
from nova.tests import uuidsentinel as uuids
|
||||
|
||||
CONF = nova.conf.CONF
|
||||
|
||||
@ -445,7 +446,29 @@ class ComputeRpcAPITestCase(test.NoDBTestCase):
|
||||
def test_swap_volume(self):
|
||||
self._test_compute_api('swap_volume', 'cast',
|
||||
instance=self.fake_instance_obj, old_volume_id='oldid',
|
||||
new_volume_id='newid')
|
||||
new_volume_id='newid', new_attachment_id=uuids.attachment_id,
|
||||
version='4.17')
|
||||
|
||||
def test_swap_volume_cannot_send_version_4_17(self):
|
||||
"""Tests that if the RPC client cannot send version 4.17 we drop back
|
||||
to version 4.0 and don't send the new_attachment_id kwarg.
|
||||
"""
|
||||
rpcapi = compute_rpcapi.ComputeAPI()
|
||||
fake_context = mock.Mock()
|
||||
fake_client = mock.Mock()
|
||||
fake_client.can_send_version.return_value = False
|
||||
fake_client.prepare.return_value = fake_context
|
||||
with mock.patch.object(rpcapi.router, 'client',
|
||||
return_value=fake_client):
|
||||
rpcapi.swap_volume(self.context, self.fake_instance_obj,
|
||||
uuids.old_volume_id, uuids.new_volume_id,
|
||||
uuids.new_attachment_id)
|
||||
fake_client.prepare.assert_called_once_with(
|
||||
server=self.fake_instance_obj.host, version='4.0')
|
||||
fake_context.cast.assert_called_once_with(
|
||||
self.context, 'swap_volume', instance=self.fake_instance_obj,
|
||||
old_volume_id=uuids.old_volume_id,
|
||||
new_volume_id=uuids.new_volume_id)
|
||||
|
||||
def test_restore_instance(self):
|
||||
self._test_compute_api('restore_instance', 'cast',
|
||||
|
Loading…
x
Reference in New Issue
Block a user