Fix rebuild with volumes attached

This patch assumes that the correct behavior for instance rebuild is to
maintain attached volumes across a rebuild operation.

Two important changes are:

1) Detaching all volumes during a rebuild so that they won't be 'in-use'
   when prep_block_devices is called to reattach them.

2) xenapi: Allowing additional volumes, not just root volumes, to be
   attached before boot. To handle this, we cycle through all
   block-device-mappings, not just the root-device, create the VDI, and
   later, create the VBD.

Small changes include:

* Using `connection_data` instead of `dev_params` (to match other parts of
  the code base)

* Renaming `get_vdis_for_boot_from_vol` to `get_vdi_uuid_for_volume` to
  reflect its more general and simpler semantics.

Fixes bug 1071547

Change-Id: Ie54a16be4bae2a718ed7d506f32777d0847b9089
This commit is contained in:
Rick Harris 2013-01-29 07:26:48 +00:00
parent 045e1190f2
commit 8705f49a4a
5 changed files with 92 additions and 64 deletions

View File

@ -149,23 +149,20 @@ def match_device(device):
return match.groups()
def volume_in_mapping(mount_device, block_device_info, strip=strip_dev):
# FIXME(sirp): xen uses strip_prefix to be mountpoint agnostic. There is
# probably a better way to handle this so that strip_dev can be used
# exclusively.
block_device_list = [strip(vol['mount_device'])
def volume_in_mapping(mount_device, block_device_info):
block_device_list = [strip_dev(vol['mount_device'])
for vol in
driver.block_device_info_get_mapping(
block_device_info)]
swap = driver.block_device_info_get_swap(block_device_info)
if driver.swap_is_usable(swap):
block_device_list.append(strip(swap['device_name']))
block_device_list.append(strip_dev(swap['device_name']))
block_device_list += [strip(ephemeral['device_name'])
block_device_list += [strip_dev(ephemeral['device_name'])
for ephemeral in
driver.block_device_info_get_ephemerals(
block_device_info)]
LOG.debug(_("block_device_list %s"), block_device_list)
return strip(mount_device) in block_device_list
return strip_dev(mount_device) in block_device_list

View File

@ -1366,18 +1366,18 @@ class ComputeManager(manager.SchedulerDependentManager):
block_device_mapping_get_all_by_instance(
context, instance)
if recreate:
for bdm in self._get_volume_bdms(bdms):
volume = self.volume_api.get(context, bdm['volume_id'])
# NOTE(sirp): this detach is necessary b/c we will reattach the
# volumes in _prep_block_devices below.
for bdm in self._get_volume_bdms(bdms):
volume = self.volume_api.get(context, bdm['volume_id'])
self.volume_api.detach(context, volume)
# We can't run volume disconnect on source because
# the host is down. Just marking volume as detached
# in db, anyway the zombie instance going to be deleted
# from source during init_host when host comes back
self.volume_api.detach(context.elevated(), volume)
else:
if not recreate:
block_device_info = self._get_volume_block_device_info(
self._get_volume_bdms(bdms))
self.driver.destroy(instance,
self._legacy_nw_info(network_info))
self._legacy_nw_info(network_info),
block_device_info=block_device_info)
instance = self._instance_update(
context, instance['uuid'],

View File

@ -14,7 +14,7 @@ XENSM_TYPE = 'xensm'
ISCSI_TYPE = 'iscsi'
def get_fake_dev_params(sr_type):
def get_fake_connection_data(sr_type):
fakes = {XENSM_TYPE: {'sr_uuid': 'falseSR',
'name_label': 'fake_storage',
'name_description': 'test purposes',
@ -73,16 +73,16 @@ class GetInstanceForVdisForSrTestCase(stubs.XenAPITestBase):
self.assertEquals([], result)
def test_get_vdis_for_boot_from_vol_with_sr_uuid(self):
dev_params = get_fake_dev_params(XENSM_TYPE)
def test_get_vdi_uuid_for_volume_with_sr_uuid(self):
connection_data = get_fake_connection_data(XENSM_TYPE)
stubs.stubout_session(self.stubs, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
result = vm_utils.get_vdis_for_boot_from_vol(driver._session,
dev_params)
self.assertEquals(result['root']['uuid'], 'falseVDI')
vdi_uuid = vm_utils.get_vdi_uuid_for_volume(
driver._session, connection_data)
self.assertEquals(vdi_uuid, 'falseVDI')
def test_get_vdis_for_boot_from_vol_failure(self):
def test_get_vdi_uuid_for_volume_failure(self):
stubs.stubout_session(self.stubs, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
@ -90,19 +90,19 @@ class GetInstanceForVdisForSrTestCase(stubs.XenAPITestBase):
return None
self.stubs.Set(volume_utils, 'introduce_sr', bad_introduce_sr)
dev_params = get_fake_dev_params(XENSM_TYPE)
connection_data = get_fake_connection_data(XENSM_TYPE)
self.assertRaises(exception.NovaException,
vm_utils.get_vdis_for_boot_from_vol,
driver._session, dev_params)
vm_utils.get_vdi_uuid_for_volume,
driver._session, connection_data)
def test_get_vdis_for_boot_from_iscsi_vol_missing_sr_uuid(self):
dev_params = get_fake_dev_params(ISCSI_TYPE)
def test_get_vdi_uuid_for_volume_from_iscsi_vol_missing_sr_uuid(self):
connection_data = get_fake_connection_data(ISCSI_TYPE)
stubs.stubout_session(self.stubs, fake.SessionBase)
driver = xenapi_conn.XenAPIDriver(False)
result = vm_utils.get_vdis_for_boot_from_vol(driver._session,
dev_params)
self.assertNotEquals(result['root']['uuid'], None)
vdi_uuid = vm_utils.get_vdi_uuid_for_volume(
driver._session, connection_data)
self.assertNotEquals(vdi_uuid, None)
class VMRefOrRaiseVMFoundTestCase(test.TestCase):

View File

@ -460,50 +460,66 @@ def create_vdi(session, sr_ref, instance, name_label, disk_type, virtual_size,
return vdi_ref
def get_vdis_for_boot_from_vol(session, dev_params):
vdis = {}
sr_uuid, label, sr_params = volume_utils.parse_sr_info(dev_params)
def get_vdi_uuid_for_volume(session, connection_data):
sr_uuid, label, sr_params = volume_utils.parse_sr_info(connection_data)
sr_ref = volume_utils.find_sr_by_uuid(session, sr_uuid)
# Try introducing SR if it is not present
if not sr_ref:
sr_ref = volume_utils.introduce_sr(session, sr_uuid, label, sr_params)
if sr_ref is None:
raise exception.NovaException(_('SR not present and could not be '
'introduced'))
vdi_uuid = None
if 'vdi_uuid' in connection_data:
session.call_xenapi("SR.scan", sr_ref)
vdi_uuid = connection_data['vdi_uuid']
else:
if 'vdi_uuid' in dev_params:
session.call_xenapi("SR.scan", sr_ref)
vdis = {'root': dict(uuid=dev_params['vdi_uuid'],
file=None, osvol=True)}
else:
try:
vdi_ref = volume_utils.introduce_vdi(session, sr_ref)
vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref)
vdis = {'root': dict(uuid=vdi_rec['uuid'],
file=None, osvol=True)}
except volume_utils.StorageError, exc:
LOG.exception(exc)
volume_utils.forget_sr(session, sr_uuid)
return vdis
try:
vdi_ref = volume_utils.introduce_vdi(session, sr_ref)
vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref)
vdi_uuid = vdi_rec['uuid']
except volume_utils.StorageError, exc:
LOG.exception(exc)
volume_utils.forget_sr(session, sr_uuid)
return vdi_uuid
def get_vdis_for_instance(context, session, instance, name_label, image,
image_type, block_device_info=None):
vdis = {}
if block_device_info:
LOG.debug(_("block device info: %s"), block_device_info)
rootdev = block_device_info['root_device_name']
if block_device.volume_in_mapping(rootdev, block_device_info,
strip=block_device.strip_prefix):
# call function to return the vdi in connection info of block
# device.
# make it a point to return from here.
bdm_root_dev = block_device_info['block_device_mapping'][0]
dev_params = bdm_root_dev['connection_info']['data']
LOG.debug(dev_params)
return get_vdis_for_boot_from_vol(session, dev_params)
return _create_image(context, session, instance, name_label, image,
image_type)
root_device_name = block_device_info['root_device_name']
for bdm in block_device_info['block_device_mapping']:
if (block_device.strip_prefix(bdm['mount_device']) ==
block_device.strip_prefix(root_device_name)):
# If we're a root-device, record that fact so we don't download
# a root image via Glance
type_ = 'root'
else:
# Otherwise, use mount_device as `type_` so that we have easy
# access to it in _attach_disks to create the VBD
type_ = bdm['mount_device']
connection_data = bdm['connection_info']['data']
vdi_uuid = get_vdi_uuid_for_volume(session, connection_data)
if vdi_uuid:
vdis[type_] = dict(uuid=vdi_uuid, file=None, osvol=True)
# If we didn't get a root VDI from volumes, then use the Glance image as
# the root device
if 'root' not in vdis:
create_image_vdis = _create_image(
context, session, instance, name_label, image, image_type)
vdis.update(create_image_vdis)
return vdis
@contextlib.contextmanager

View File

@ -26,6 +26,7 @@ import time
from eventlet import greenthread
import netaddr
from nova import block_device
from nova.compute import api as compute
from nova.compute import power_state
from nova.compute import task_states
@ -503,8 +504,9 @@ class VMOps(object):
ctx = nova_context.get_admin_context()
instance_type = instance['instance_type']
# DISK_ISO needs two VBDs: the ISO disk and a blank RW disk
# Attach (required) root disk
if disk_image_type == vm_utils.ImageType.DISK_ISO:
# DISK_ISO needs two VBDs: the ISO disk and a blank RW disk
LOG.debug(_("Detected ISO image type, creating blank VM "
"for install"), instance=instance)
@ -532,6 +534,19 @@ class VMOps(object):
DEVICE_ROOT, bootable=True,
osvol=root_vdi.get('osvol'))
# Attach (optional) additional block-devices
for type_, vdi_info in vdis.items():
# Additional block-devices for boot use their device-name as the
# type.
if not type_.startswith('/dev'):
continue
# Convert device name to userdevice number, e.g. /dev/xvdb -> 1
userdevice = ord(block_device.strip_prefix(type_)) - ord('a')
vm_utils.create_vbd(self._session, vm_ref, vdi_info['ref'],
userdevice, bootable=False,
osvol=vdi_info.get('osvol'))
# Attach (optional) swap disk
swap_mb = instance_type['swap']
if swap_mb: