Code cleanup for rebuild block device mapping
* Removes extra call to get block-device-mappings when detaching during recreate. * Untangle network handling from block-device handling in rebuild * DRYs up evacuate hosts tests * Temporary mutation supports dicts as well as objs * Make instance recreate exceptions more specific to aid testing Change-Id: I242f9f6a7d329f27b06b7e81b9a418e01682c82e
This commit is contained in:
parent
05751bb786
commit
8ab6a0fd9e
@ -641,13 +641,15 @@ class ComputeManager(manager.SchedulerDependentManager):
|
||||
'delete_on_termination': bdm['delete_on_termination']}
|
||||
block_device_mapping.append(bdmap)
|
||||
|
||||
return {
|
||||
block_device_info = {
|
||||
'root_device_name': instance['root_device_name'],
|
||||
'swap': swap,
|
||||
'ephemerals': ephemerals,
|
||||
'block_device_mapping': block_device_mapping
|
||||
}
|
||||
|
||||
return block_device_info
|
||||
|
||||
def _run_instance(self, context, request_spec,
|
||||
filter_properties, requested_networks, injected_files,
|
||||
admin_password, is_first_time, node, instance):
|
||||
@ -655,7 +657,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
||||
context = context.elevated()
|
||||
|
||||
try:
|
||||
self._check_instance_not_already_created(context, instance)
|
||||
self._check_instance_exists(context, instance)
|
||||
image_meta = self._check_image_size(context, instance)
|
||||
|
||||
if node is None:
|
||||
@ -839,11 +841,10 @@ class ComputeManager(manager.SchedulerDependentManager):
|
||||
**update_info)
|
||||
return instance
|
||||
|
||||
def _check_instance_not_already_created(self, context, instance):
|
||||
def _check_instance_exists(self, context, instance):
|
||||
"""Ensure an instance with the same name is not already present."""
|
||||
if self.driver.instance_exists(instance['name']):
|
||||
_msg = _("Instance has already been created")
|
||||
raise exception.Invalid(_msg)
|
||||
raise exception.InstanceExists(name=instance['name'])
|
||||
|
||||
def _check_image_size(self, context, instance):
|
||||
"""Ensure image is smaller than the maximum size allowed by the
|
||||
@ -1287,6 +1288,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
||||
:param injected_files: Files to inject
|
||||
:param new_pass: password to set on rebuilt instance
|
||||
:param orig_sys_metadata: instance system metadata from pre-rebuild
|
||||
:param bdms: block-device-mappings to use for rebuild
|
||||
:param recreate: True if instance should be recreated with same disk
|
||||
:param on_shared_storage: True if instance files on shared storage
|
||||
"""
|
||||
@ -1298,39 +1300,28 @@ class ComputeManager(manager.SchedulerDependentManager):
|
||||
instance=instance)
|
||||
|
||||
if recreate:
|
||||
|
||||
if not self.driver.capabilities["supports_recreate"]:
|
||||
# if driver doesn't support recreate return with failure
|
||||
_msg = _('instance recreate is not implemented '
|
||||
'by this driver.')
|
||||
raise exception.InstanceRecreateNotSupported
|
||||
|
||||
LOG.warn(_msg, instance=instance)
|
||||
self._instance_update(context,
|
||||
instance['uuid'],
|
||||
task_state=None,
|
||||
expected_task_state=task_states.
|
||||
REBUILDING)
|
||||
raise exception.Invalid(_msg)
|
||||
self._check_instance_exists(context, instance)
|
||||
|
||||
self._check_instance_not_already_created(context, instance)
|
||||
|
||||
# to cover case when admin expects that instance files are on
|
||||
# To cover case when admin expects that instance files are on
|
||||
# shared storage, but not accessible and vice versa
|
||||
if on_shared_storage != self.driver.instance_on_disk(instance):
|
||||
_msg = _("Invalid state of instance files on "
|
||||
"shared storage")
|
||||
raise exception.Invalid(_msg)
|
||||
raise exception.InvalidSharedStorage(
|
||||
_("Invalid state of instance files on shared"
|
||||
" storage"))
|
||||
|
||||
if on_shared_storage:
|
||||
LOG.info(_('disk on shared storage,'
|
||||
'recreating using existing disk'))
|
||||
LOG.info(_('disk on shared storage, recreating using'
|
||||
' existing disk'))
|
||||
else:
|
||||
image_ref = orig_image_ref = instance['image_ref']
|
||||
LOG.info(_("disk not on shared storage"
|
||||
"rebuilding from: '%s'") % str(image_ref))
|
||||
LOG.info(_("disk not on shared storagerebuilding from:"
|
||||
" '%s'") % str(image_ref))
|
||||
|
||||
instance = self._instance_update(context, instance['uuid'],
|
||||
host=self.host)
|
||||
instance = self._instance_update(
|
||||
context, instance['uuid'], host=self.host)
|
||||
|
||||
if image_ref:
|
||||
image_meta = _get_image_meta(context, image_ref)
|
||||
@ -1351,15 +1342,25 @@ class ComputeManager(manager.SchedulerDependentManager):
|
||||
self._notify_about_instance_usage(context, instance,
|
||||
"rebuild.start", extra_usage_info=extra_usage_info)
|
||||
|
||||
current_power_state = self._get_power_state(context, instance)
|
||||
instance = self._instance_update(context, instance['uuid'],
|
||||
power_state=current_power_state,
|
||||
instance = self._instance_update(
|
||||
context, instance['uuid'],
|
||||
power_state=self._get_power_state(context, instance),
|
||||
task_state=task_states.REBUILDING,
|
||||
expected_task_state=task_states.REBUILDING)
|
||||
|
||||
if recreate:
|
||||
# Detaching volumes.
|
||||
for bdm in self._get_instance_volume_bdms(context, instance):
|
||||
self.network_api.setup_networks_on_host(
|
||||
context, instance, self.host)
|
||||
|
||||
network_info = self._get_instance_nw_info(context, instance)
|
||||
|
||||
if bdms is None:
|
||||
bdms = self.conductor_api.\
|
||||
block_device_mapping_get_all_by_instance(
|
||||
context, instance)
|
||||
|
||||
if recreate:
|
||||
for bdm in self._get_volume_bdms(bdms):
|
||||
volume = self.volume_api.get(context, bdm['volume_id'])
|
||||
|
||||
# We can't run volume disconnect on source because
|
||||
@ -1367,48 +1368,38 @@ class ComputeManager(manager.SchedulerDependentManager):
|
||||
# in db, anyway the zombie instance going to be deleted
|
||||
# from source during init_host when host comes back
|
||||
self.volume_api.detach(context.elevated(), volume)
|
||||
|
||||
self.network_api.setup_networks_on_host(context,
|
||||
instance, self.host)
|
||||
else:
|
||||
network_info = self._get_instance_nw_info(context, instance)
|
||||
self.driver.destroy(instance,
|
||||
self._legacy_nw_info(network_info))
|
||||
|
||||
instance = self._instance_update(context, instance['uuid'],
|
||||
instance = self._instance_update(
|
||||
context, instance['uuid'],
|
||||
task_state=task_states.REBUILD_BLOCK_DEVICE_MAPPING,
|
||||
expected_task_state=task_states.REBUILDING)
|
||||
|
||||
block_device_info = self._setup_block_device_mapping(
|
||||
context, instance, bdms)
|
||||
|
||||
instance['injected_files'] = injected_files
|
||||
network_info = self._get_instance_nw_info(context, instance)
|
||||
if bdms is None:
|
||||
capi = self.conductor_api
|
||||
bdms = capi.block_device_mapping_get_all_by_instance(
|
||||
context, instance)
|
||||
device_info = self._setup_block_device_mapping(context, instance,
|
||||
bdms)
|
||||
|
||||
expected_task_state = task_states.REBUILD_BLOCK_DEVICE_MAPPING
|
||||
instance = self._instance_update(context, instance['uuid'],
|
||||
instance = self._instance_update(
|
||||
context, instance['uuid'],
|
||||
task_state=task_states.REBUILD_SPAWNING,
|
||||
expected_task_state=expected_task_state)
|
||||
|
||||
admin_password = new_pass
|
||||
expected_task_state=
|
||||
task_states.REBUILD_BLOCK_DEVICE_MAPPING)
|
||||
|
||||
self.driver.spawn(context, instance, image_meta,
|
||||
[], admin_password,
|
||||
self._legacy_nw_info(network_info),
|
||||
device_info)
|
||||
[], new_pass,
|
||||
network_info=self._legacy_nw_info(network_info),
|
||||
block_device_info=block_device_info)
|
||||
|
||||
current_power_state = self._get_power_state(context, instance)
|
||||
instance = self._instance_update(context,
|
||||
instance['uuid'],
|
||||
power_state=current_power_state,
|
||||
vm_state=vm_states.ACTIVE,
|
||||
task_state=None,
|
||||
expected_task_state=task_states.
|
||||
REBUILD_SPAWNING,
|
||||
launched_at=timeutils.utcnow())
|
||||
instance = self._instance_update(
|
||||
context, instance['uuid'],
|
||||
power_state=self._get_power_state(context, instance),
|
||||
vm_state=vm_states.ACTIVE,
|
||||
task_state=None,
|
||||
expected_task_state=task_states.REBUILD_SPAWNING,
|
||||
launched_at=timeutils.utcnow())
|
||||
|
||||
LOG.info(_("bringing vm to original state: '%s'") % orig_vm_state)
|
||||
if orig_vm_state == vm_states.STOPPED:
|
||||
|
@ -1120,3 +1120,7 @@ class CryptoCAFileNotFound(FileNotFound):
|
||||
|
||||
class CryptoCRLFileNotFound(FileNotFound):
|
||||
message = _("The CRL file for %(project)s could not be found")
|
||||
|
||||
|
||||
class InstanceRecreateNotSupported(Invalid):
|
||||
message = _('Instance recreate is not implemented by this virt driver.')
|
||||
|
@ -144,7 +144,22 @@ class BaseTestCase(test.TestCase):
|
||||
self.stubs.Set(self.compute, 'scheduler_rpcapi', fake_rpcapi)
|
||||
fake_network.set_stub_network_methods(self.stubs)
|
||||
|
||||
def fake_get_nw_info(cls, ctxt, instance, *args, **kwargs):
|
||||
self.assertTrue(ctxt.is_admin)
|
||||
return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1,
|
||||
spectacular=True)
|
||||
|
||||
self.stubs.Set(network_api.API, 'get_instance_nw_info',
|
||||
fake_get_nw_info)
|
||||
self.stubs.Set(network_api.API, 'allocate_for_instance',
|
||||
fake_get_nw_info)
|
||||
self.compute_api = compute.API()
|
||||
|
||||
# Just to make long lines short
|
||||
self.rt = self.compute._get_resource_tracker(NODENAME)
|
||||
|
||||
def tearDown(self):
|
||||
timeutils.clear_time_override()
|
||||
ctxt = context.get_admin_context()
|
||||
fake_image.FakeImageService_reset()
|
||||
instances = db.instance_get_all(ctxt)
|
||||
@ -212,25 +227,6 @@ class BaseTestCase(test.TestCase):
|
||||
|
||||
|
||||
class ComputeTestCase(BaseTestCase):
|
||||
def setUp(self):
|
||||
def fake_get_nw_info(cls, ctxt, instance, *args, **kwargs):
|
||||
self.assertTrue(ctxt.is_admin)
|
||||
return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1,
|
||||
spectacular=True)
|
||||
|
||||
super(ComputeTestCase, self).setUp()
|
||||
self.stubs.Set(network_api.API, 'get_instance_nw_info',
|
||||
fake_get_nw_info)
|
||||
self.stubs.Set(network_api.API, 'allocate_for_instance',
|
||||
fake_get_nw_info)
|
||||
self.compute_api = compute.API()
|
||||
# Just to make long lines short
|
||||
self.rt = self.compute._get_resource_tracker(NODENAME)
|
||||
|
||||
def tearDown(self):
|
||||
super(ComputeTestCase, self).tearDown()
|
||||
timeutils.clear_time_override()
|
||||
|
||||
def test_wrap_instance_fault(self):
|
||||
inst = {"uuid": "fake_uuid"}
|
||||
|
||||
@ -1570,7 +1566,7 @@ class ComputeTestCase(BaseTestCase):
|
||||
# Ensure failure when running an instance that already exists.
|
||||
instance = jsonutils.to_primitive(self._create_fake_instance())
|
||||
self.compute.run_instance(self.context, instance=instance)
|
||||
self.assertRaises(exception.Invalid,
|
||||
self.assertRaises(exception.InstanceExists,
|
||||
self.compute.run_instance,
|
||||
self.context,
|
||||
instance=instance)
|
||||
@ -3397,255 +3393,6 @@ class ComputeTestCase(BaseTestCase):
|
||||
result = self.compute._get_instances_on_driver(fake_context)
|
||||
self.assertEqual(driver_instances, result)
|
||||
|
||||
def test_rebuild_on_host_updated_target(self):
|
||||
"""Confirm evacuate scenario updates host."""
|
||||
|
||||
# creating testdata
|
||||
c = self.context.elevated()
|
||||
|
||||
inst_ref = self._create_fake_instance({'host': 'someotherhost'})
|
||||
db.instance_update(self.context, inst_ref['uuid'],
|
||||
{"task_state": task_states.REBUILDING})
|
||||
inst_id = inst_ref["id"]
|
||||
inst_uuid = inst_ref["uuid"]
|
||||
dest = self.compute.host
|
||||
|
||||
def set_shared_storage(instance):
|
||||
return True
|
||||
|
||||
self.stubs.Set(self.compute.driver, 'instance_on_disk',
|
||||
set_shared_storage)
|
||||
|
||||
self.compute.rebuild_instance(c, instance=inst_ref,
|
||||
injected_files=None, image_ref=None,
|
||||
orig_image_ref=None, new_pass=None,
|
||||
orig_sys_metadata=None, bdms=[],
|
||||
recreate=True, on_shared_storage=True)
|
||||
|
||||
# make sure instance is updated with destination hostname.
|
||||
instance = db.instance_get(c, inst_id)
|
||||
self.assertTrue(instance['host'])
|
||||
self.assertEqual(instance['host'], dest)
|
||||
|
||||
# cleanup
|
||||
db.instance_destroy(c, inst_uuid)
|
||||
|
||||
def test_rebuild_with_wrong_shared_storage(self):
|
||||
"""Confirm evacuate scenario does not update host."""
|
||||
|
||||
# creating testdata
|
||||
c = self.context.elevated()
|
||||
|
||||
inst_ref = self._create_fake_instance({'host': 'srchost'})
|
||||
db.instance_update(self.context, inst_ref['uuid'],
|
||||
{"task_state": task_states.REBUILDING})
|
||||
inst_id = inst_ref["id"]
|
||||
inst_uuid = inst_ref["uuid"]
|
||||
dest = self.compute.host
|
||||
|
||||
def set_shared_storage(instance):
|
||||
return True
|
||||
|
||||
self.stubs.Set(self.compute.driver, 'instance_on_disk',
|
||||
set_shared_storage)
|
||||
|
||||
self.assertRaises(exception.Invalid,
|
||||
self.compute.rebuild_instance, c, instance=inst_ref,
|
||||
injected_files=None, image_ref=None,
|
||||
orig_image_ref=None, new_pass=None,
|
||||
orig_sys_metadata=None,
|
||||
recreate=True, on_shared_storage=False)
|
||||
|
||||
# make sure instance was not updated with destination hostname.
|
||||
instance = db.instance_get(c, inst_id)
|
||||
self.assertTrue(instance['host'])
|
||||
self.assertEqual(instance['host'], 'srchost')
|
||||
|
||||
# cleanup
|
||||
db.instance_destroy(c, inst_uuid)
|
||||
|
||||
def test_rebuild_on_host_with_volumes(self):
|
||||
"""Confirm evacuate scenario reconnects volumes."""
|
||||
|
||||
# creating testdata
|
||||
inst_ref = jsonutils.to_primitive(self._create_fake_instance
|
||||
({'host': 'fake_host_2'}))
|
||||
db.instance_update(self.context, inst_ref['uuid'],
|
||||
{"task_state": task_states.REBUILDING})
|
||||
|
||||
inst_id = inst_ref["id"]
|
||||
inst_uuid = inst_ref["uuid"]
|
||||
|
||||
volume_id = 'fake'
|
||||
values = {'instance_uuid': inst_ref['uuid'],
|
||||
'device_name': '/dev/vdc',
|
||||
'delete_on_termination': False,
|
||||
'volume_id': volume_id,
|
||||
}
|
||||
|
||||
admin = context.get_admin_context()
|
||||
db.block_device_mapping_create(admin, values)
|
||||
|
||||
def set_shared_storage(instance):
|
||||
return True
|
||||
|
||||
self.stubs.Set(self.compute.driver, 'instance_on_disk',
|
||||
set_shared_storage)
|
||||
|
||||
def fake_volume_get(self, context, volume):
|
||||
return {'id': volume_id}
|
||||
self.stubs.Set(cinder.API, "get", fake_volume_get)
|
||||
|
||||
# Stub out and record whether it gets detached
|
||||
result = {"detached": False}
|
||||
|
||||
def fake_detach(self, context, volume):
|
||||
result["detached"] = volume["id"] == volume_id
|
||||
self.stubs.Set(cinder.API, "detach", fake_detach)
|
||||
|
||||
def fake_terminate_connection(self, context, volume, connector):
|
||||
return {}
|
||||
self.stubs.Set(cinder.API, "terminate_connection",
|
||||
fake_terminate_connection)
|
||||
|
||||
# make sure volumes attach, detach are called
|
||||
self.mox.StubOutWithMock(self.compute.volume_api, 'detach')
|
||||
self.compute.volume_api.detach(mox.IsA(admin), mox.IgnoreArg())
|
||||
|
||||
self.mox.StubOutWithMock(self.compute, '_setup_block_device_mapping')
|
||||
self.compute._setup_block_device_mapping(mox.IsA(admin),
|
||||
mox.IsA(inst_ref),
|
||||
mox.IgnoreArg())
|
||||
|
||||
# start test
|
||||
self.mox.ReplayAll()
|
||||
|
||||
self.compute.rebuild_instance(admin, instance=inst_ref,
|
||||
injected_files=None, image_ref=None,
|
||||
orig_image_ref=None, new_pass=None,
|
||||
orig_sys_metadata=None, bdms=[],
|
||||
recreate=True, on_shared_storage=True)
|
||||
|
||||
# cleanup
|
||||
for bdms in db.block_device_mapping_get_all_by_instance(
|
||||
admin, inst_uuid):
|
||||
db.block_device_mapping_destroy(admin, bdms['id'])
|
||||
db.instance_destroy(admin, inst_uuid)
|
||||
|
||||
def test_rebuild_on_host_with_shared_storage(self):
|
||||
"""Confirm evacuate scenario on shared storage."""
|
||||
|
||||
# creating testdata
|
||||
c = self.context.elevated()
|
||||
|
||||
inst_ref = jsonutils.to_primitive(self._create_fake_instance
|
||||
({'host': 'fake_host_2'}))
|
||||
|
||||
inst_uuid = inst_ref["uuid"]
|
||||
dest = self.compute.host
|
||||
|
||||
def set_shared_storage(instance):
|
||||
return True
|
||||
|
||||
self.stubs.Set(self.compute.driver, 'instance_on_disk',
|
||||
set_shared_storage)
|
||||
|
||||
self.mox.StubOutWithMock(self.compute.driver,
|
||||
'spawn')
|
||||
self.compute.driver.spawn(mox.IsA(c), mox.IsA(inst_ref), {},
|
||||
mox.IgnoreArg(), None,
|
||||
mox.IgnoreArg(), mox.IgnoreArg())
|
||||
|
||||
# start test
|
||||
self.mox.ReplayAll()
|
||||
db.instance_update(self.context, inst_ref['uuid'],
|
||||
{"task_state": task_states.REBUILDING})
|
||||
|
||||
self.compute.rebuild_instance(c, instance=inst_ref,
|
||||
injected_files=None, image_ref=None,
|
||||
orig_image_ref=None, new_pass=None,
|
||||
orig_sys_metadata=None, bdms=[],
|
||||
recreate=True, on_shared_storage=True)
|
||||
|
||||
# cleanup
|
||||
db.instance_destroy(c, inst_uuid)
|
||||
|
||||
def test_rebuild_on_host_without_shared_storage(self):
|
||||
"""Confirm evacuate scenario without shared storage
|
||||
(rebuild from image)"""
|
||||
|
||||
# creating testdata
|
||||
c = self.context.elevated()
|
||||
|
||||
inst_ref = jsonutils.to_primitive(self._create_fake_instance
|
||||
({'host': 'fake_host_2'}))
|
||||
|
||||
inst_uuid = inst_ref["uuid"]
|
||||
dest = self.compute.host
|
||||
|
||||
fake_image = {
|
||||
'id': 1,
|
||||
'name': 'fake_name',
|
||||
'properties': {'kernel_id': 'fake_kernel_id',
|
||||
'ramdisk_id': 'fake_ramdisk_id'},
|
||||
}
|
||||
|
||||
def set_shared_storage(instance):
|
||||
return False
|
||||
|
||||
self.stubs.Set(self.compute.driver, 'instance_on_disk',
|
||||
set_shared_storage)
|
||||
|
||||
self.mox.StubOutWithMock(self.compute.driver,
|
||||
'spawn')
|
||||
self.compute.driver.spawn(mox.IsA(c), mox.IsA(inst_ref),
|
||||
mox.IsA(fake_image), mox.IgnoreArg(),
|
||||
mox.IgnoreArg(), mox.IgnoreArg(),
|
||||
mox.IgnoreArg())
|
||||
|
||||
# start test
|
||||
self.mox.ReplayAll()
|
||||
|
||||
db.instance_update(self.context, inst_ref['uuid'],
|
||||
{"task_state": task_states.REBUILDING})
|
||||
|
||||
self.compute.rebuild_instance(c, instance=inst_ref,
|
||||
injected_files=None, image_ref=None,
|
||||
orig_image_ref=None, new_pass='newpass',
|
||||
orig_sys_metadata=None, bdms=[],
|
||||
recreate=True, on_shared_storage=False)
|
||||
|
||||
# cleanup
|
||||
db.instance_destroy(c, inst_uuid)
|
||||
|
||||
def test_rebuild_on_host_instance_exists(self):
|
||||
"""Rebuild if instance exists raise an exception."""
|
||||
|
||||
# creating testdata
|
||||
c = self.context.elevated()
|
||||
inst_ref = self._create_fake_instance({'host': 'fake_host_2'})
|
||||
dest = self.compute.host
|
||||
|
||||
instance = jsonutils.to_primitive(self._create_fake_instance())
|
||||
instance_uuid = instance['uuid']
|
||||
dest = self.compute.host
|
||||
|
||||
self.compute.run_instance(self.context, instance=instance)
|
||||
|
||||
db.instance_update(self.context, inst_ref['uuid'],
|
||||
{"task_state": task_states.REBUILDING})
|
||||
|
||||
self.assertRaises(exception.Invalid,
|
||||
self.compute.rebuild_instance, c, instance=inst_ref,
|
||||
injected_files=None, image_ref=None,
|
||||
orig_image_ref=None, new_pass=None,
|
||||
orig_sys_metadata=None,
|
||||
recreate=True, on_shared_storage=True)
|
||||
|
||||
# cleanup
|
||||
db.instance_destroy(c, inst_ref['uuid'])
|
||||
self.compute.terminate_instance(self.context, instance=instance)
|
||||
|
||||
|
||||
class ComputeAPITestCase(BaseTestCase):
|
||||
|
||||
@ -6996,3 +6743,144 @@ class ComputeInactiveImageTestCase(BaseTestCase):
|
||||
self.assertRaises(exception.ImageNotActive,
|
||||
self.compute_api.create,
|
||||
self.context, inst_type, 'fake-image-uuid')
|
||||
|
||||
|
||||
class EvacuateHostTestCase(BaseTestCase):
|
||||
def setUp(self):
|
||||
super(EvacuateHostTestCase, self).setUp()
|
||||
self.inst_ref = jsonutils.to_primitive(self._create_fake_instance
|
||||
({'host': 'fake_host_2'}))
|
||||
db.instance_update(self.context, self.inst_ref['uuid'],
|
||||
{"task_state": task_states.REBUILDING})
|
||||
|
||||
def tearDown(self):
|
||||
db.instance_destroy(self.context, self.inst_ref['uuid'])
|
||||
super(EvacuateHostTestCase, self).tearDown()
|
||||
|
||||
def _rebuild(self, on_shared_storage=True):
|
||||
orig_image_ref = None
|
||||
image_ref = None
|
||||
injected_files = None
|
||||
self.compute.rebuild_instance(
|
||||
self.context, self.inst_ref, orig_image_ref, image_ref,
|
||||
injected_files, 'newpass', recreate=True,
|
||||
on_shared_storage=on_shared_storage)
|
||||
|
||||
def test_rebuild_on_host_updated_target(self):
|
||||
"""Confirm evacuate scenario updates host."""
|
||||
self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
|
||||
self.mox.ReplayAll()
|
||||
|
||||
self._rebuild()
|
||||
|
||||
# Should be on destination host
|
||||
instance = db.instance_get(self.context, self.inst_ref['id'])
|
||||
self.assertEqual(instance['host'], self.compute.host)
|
||||
|
||||
def test_rebuild_with_wrong_shared_storage(self):
|
||||
"""Confirm evacuate scenario does not update host."""
|
||||
self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
|
||||
self.mox.ReplayAll()
|
||||
|
||||
self.assertRaises(exception.InvalidSharedStorage,
|
||||
lambda: self._rebuild(on_shared_storage=False))
|
||||
|
||||
# Should remain on original host
|
||||
instance = db.instance_get(self.context, self.inst_ref['id'])
|
||||
self.assertEqual(instance['host'], 'fake_host_2')
|
||||
|
||||
def test_rebuild_on_host_with_volumes(self):
|
||||
"""Confirm evacuate scenario reconnects volumes."""
|
||||
values = {'instance_uuid': self.inst_ref['uuid'],
|
||||
'device_name': '/dev/vdc',
|
||||
'delete_on_termination': False,
|
||||
'volume_id': 'fake_volume_id'}
|
||||
|
||||
db.block_device_mapping_create(self.context, values)
|
||||
|
||||
def fake_volume_get(self, context, volume):
|
||||
return {'id': 'fake_volume_id'}
|
||||
self.stubs.Set(cinder.API, "get", fake_volume_get)
|
||||
|
||||
# Stub out and record whether it gets detached
|
||||
result = {"detached": False}
|
||||
|
||||
def fake_detach(self, context, volume):
|
||||
result["detached"] = volume["id"] == 'fake_volume_id'
|
||||
self.stubs.Set(cinder.API, "detach", fake_detach)
|
||||
|
||||
def fake_terminate_connection(self, context, volume, connector):
|
||||
return {}
|
||||
self.stubs.Set(cinder.API, "terminate_connection",
|
||||
fake_terminate_connection)
|
||||
|
||||
# make sure volumes attach, detach are called
|
||||
self.mox.StubOutWithMock(self.compute.volume_api, 'detach')
|
||||
self.compute.volume_api.detach(mox.IsA(self.context), mox.IgnoreArg())
|
||||
|
||||
self.mox.StubOutWithMock(self.compute, '_setup_block_device_mapping')
|
||||
self.compute._setup_block_device_mapping(mox.IsA(self.context),
|
||||
mox.IsA(self.inst_ref),
|
||||
mox.IgnoreArg())
|
||||
|
||||
self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
|
||||
self.mox.ReplayAll()
|
||||
|
||||
self._rebuild()
|
||||
|
||||
# cleanup
|
||||
for bdms in db.block_device_mapping_get_all_by_instance(
|
||||
self.context, self.inst_ref['uuid']):
|
||||
db.block_device_mapping_destroy(self.context, bdms['id'])
|
||||
|
||||
def test_rebuild_on_host_with_shared_storage(self):
|
||||
"""Confirm evacuate scenario on shared storage."""
|
||||
self.mox.StubOutWithMock(self.compute.driver, 'spawn')
|
||||
self.compute.driver.spawn(mox.IsA(self.context),
|
||||
mox.IsA(self.inst_ref), {}, mox.IgnoreArg(), 'newpass',
|
||||
network_info=mox.IgnoreArg(),
|
||||
block_device_info=mox.IgnoreArg())
|
||||
|
||||
self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
|
||||
self.mox.ReplayAll()
|
||||
|
||||
self._rebuild()
|
||||
|
||||
def test_rebuild_on_host_without_shared_storage(self):
|
||||
"""Confirm evacuate scenario without shared storage
|
||||
(rebuild from image)
|
||||
"""
|
||||
fake_image = {'id': 1,
|
||||
'name': 'fake_name',
|
||||
'properties': {'kernel_id': 'fake_kernel_id',
|
||||
'ramdisk_id': 'fake_ramdisk_id'}}
|
||||
|
||||
self.mox.StubOutWithMock(self.compute.driver, 'spawn')
|
||||
self.compute.driver.spawn(mox.IsA(self.context),
|
||||
mox.IsA(self.inst_ref), mox.IsA(fake_image), mox.IgnoreArg(),
|
||||
mox.IsA('newpass'), network_info=mox.IgnoreArg(),
|
||||
block_device_info=mox.IgnoreArg())
|
||||
|
||||
self.stubs.Set(self.compute.driver, 'instance_on_disk',
|
||||
lambda x: False)
|
||||
self.mox.ReplayAll()
|
||||
|
||||
self._rebuild(on_shared_storage=False)
|
||||
|
||||
def test_rebuild_on_host_instance_exists(self):
|
||||
"""Rebuild if instance exists raises an exception."""
|
||||
db.instance_update(self.context, self.inst_ref['uuid'],
|
||||
{"task_state": task_states.SCHEDULING})
|
||||
self.compute.run_instance(self.context, instance=self.inst_ref)
|
||||
|
||||
self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True)
|
||||
self.assertRaises(exception.InstanceExists,
|
||||
lambda: self._rebuild(on_shared_storage=True))
|
||||
|
||||
def test_driver_doesnt_support_recreate(self):
|
||||
with utils.temporary_mutation(self.compute.driver.capabilities,
|
||||
supports_recreate=False):
|
||||
self.stubs.Set(self.compute.driver, 'instance_on_disk',
|
||||
lambda x: True)
|
||||
self.assertRaises(exception.InstanceRecreateNotSupported,
|
||||
lambda: self._rebuild(on_shared_storage=True))
|
||||
|
@ -1087,21 +1087,42 @@ def temporary_mutation(obj, **kwargs):
|
||||
with temporary_mutation(context, read_deleted="yes"):
|
||||
do_something_that_needed_deleted_objects()
|
||||
"""
|
||||
def is_dict_like(thing):
|
||||
return hasattr(thing, 'has_key')
|
||||
|
||||
def get(thing, attr, default):
|
||||
if is_dict_like(thing):
|
||||
return thing.get(attr, default)
|
||||
else:
|
||||
return getattr(thing, attr, default)
|
||||
|
||||
def set_value(thing, attr, val):
|
||||
if is_dict_like(thing):
|
||||
thing[attr] = val
|
||||
else:
|
||||
setattr(thing, attr, val)
|
||||
|
||||
def delete(thing, attr):
|
||||
if is_dict_like(thing):
|
||||
del thing[attr]
|
||||
else:
|
||||
delattr(thing, attr)
|
||||
|
||||
NOT_PRESENT = object()
|
||||
|
||||
old_values = {}
|
||||
for attr, new_value in kwargs.items():
|
||||
old_values[attr] = getattr(obj, attr, NOT_PRESENT)
|
||||
setattr(obj, attr, new_value)
|
||||
old_values[attr] = get(obj, attr, NOT_PRESENT)
|
||||
set_value(obj, attr, new_value)
|
||||
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
for attr, old_value in old_values.items():
|
||||
if old_value is NOT_PRESENT:
|
||||
del obj[attr]
|
||||
delete(obj, attr)
|
||||
else:
|
||||
setattr(obj, attr, old_value)
|
||||
set_value(obj, attr, old_value)
|
||||
|
||||
|
||||
def generate_mac_address():
|
||||
|
Loading…
x
Reference in New Issue
Block a user