Change RPC for select_destinations()

This changes the RPC call for select_destinations() as made by the
conductor. The previous patch added the logic on the scheduler side;
this patch changes the conductor side to use the two new parameters that
flag the new behaviors for Selection objects and alternate hosts.

Blueprint: return-alternate-hosts

Change-Id: I70b11dd489d222be3d70733355bfe7966df556aa
This commit is contained in:
Ed Leafe 2017-10-27 19:48:11 +00:00 committed by Matt Riedemann
parent 32c8ac6b7d
commit ca716ce4dd
11 changed files with 219 additions and 220 deletions

View File

@ -796,6 +796,9 @@ class ComputeAPI(object):
recreate=False, on_shared_storage=False, host=None, node=None,
preserve_ephemeral=False, migration=None, limits=None,
kwargs=None):
# NOTE(edleafe): compute nodes can only use the dict form of limits.
if isinstance(limits, objects.SchedulerLimits):
limits = limits.to_dict()
# NOTE(danms): kwargs is only here for cells compatibility, don't
# actually send it to compute
extra = {'preserve_ephemeral': preserve_ephemeral,
@ -1114,7 +1117,9 @@ class ComputeAPI(object):
filter_properties, admin_password=None, injected_files=None,
requested_networks=None, security_groups=None,
block_device_mapping=None, node=None, limits=None):
# NOTE(edleafe): compute nodes can only use the dict form of limits.
if isinstance(limits, objects.SchedulerLimits):
limits = limits.to_dict()
version = '4.0'
cctxt = self.router.client(ctxt).prepare(
server=host, version=version)

View File

@ -496,7 +496,7 @@ class ComputeTaskManager(base.Base):
else:
try:
host_mapping = objects.HostMapping.get_by_host(context,
host['host'])
host.service_host)
except exception.HostMappingNotFound:
# NOTE(alaski): For now this exception means that a
# deployment has not migrated to cellsv2 and we should
@ -545,8 +545,8 @@ class ComputeTaskManager(base.Base):
instance_uuids = [instance.uuid for instance in instances]
spec_obj = objects.RequestSpec.from_primitives(
context, request_spec, filter_properties)
hosts = self._schedule_instances(
context, spec_obj, instance_uuids)
host_lists = self._schedule_instances(context, spec_obj,
instance_uuids, return_alternates=True)
except Exception as exc:
updates = {'vm_state': vm_states.ERROR, 'task_state': None}
for instance in instances:
@ -563,10 +563,11 @@ class ComputeTaskManager(base.Base):
context, instance, requested_networks)
return
for (instance, host) in six.moves.zip(instances, hosts):
for (instance, host_list) in six.moves.zip(instances, host_lists):
host = host_list[0]
instance.availability_zone = (
availability_zones.get_host_availability_zone(context,
host['host']))
host.service_host))
try:
# NOTE(danms): This saves the az change above, refreshes our
# instance, and tells us if it has been deleted underneath us
@ -605,22 +606,23 @@ class ComputeTaskManager(base.Base):
return
self.compute_rpcapi.build_and_run_instance(context,
instance=instance, host=host['host'], image=image,
instance=instance, host=host.service_host, image=image,
request_spec=request_spec,
filter_properties=local_filter_props,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=bdms, node=host['nodename'],
limits=host['limits'])
block_device_mapping=bdms, node=host.nodename,
limits=host.limits)
def _schedule_instances(self, context, request_spec,
instance_uuids=None):
instance_uuids=None, return_alternates=False):
scheduler_utils.setup_instance_group(context, request_spec)
hosts = self.scheduler_client.select_destinations(context,
request_spec, instance_uuids)
return hosts
host_lists = self.scheduler_client.select_destinations(context,
request_spec, instance_uuids, return_objects=True,
return_alternates=return_alternates)
return host_lists
@targets_cell
def unshelve_instance(self, context, instance, request_spec=None):
@ -701,12 +703,14 @@ class ComputeTaskManager(base.Base):
objects.Destination(
cell=instance_mapping.cell_mapping))
hosts = self._schedule_instances(context, request_spec,
[instance.uuid])
host_state = hosts[0]
host_lists = self._schedule_instances(context,
request_spec, [instance.uuid],
return_alternates=False)
host_list = host_lists[0]
selection = host_list[0]
scheduler_utils.populate_filter_properties(
filter_properties, host_state)
(host, node) = (host_state['host'], host_state['nodename'])
filter_properties, selection)
(host, node) = (selection.service_host, selection.nodename)
instance.availability_zone = (
availability_zones.get_host_availability_zone(
context, host))
@ -847,12 +851,13 @@ class ComputeTaskManager(base.Base):
# is not forced to be the original host
request_spec.reset_forced_destinations()
try:
hosts = self._schedule_instances(context, request_spec,
[instance.uuid])
host_dict = hosts.pop(0)
host, node, limits = (host_dict['host'],
host_dict['nodename'],
host_dict['limits'])
host_lists = self._schedule_instances(context,
request_spec, [instance.uuid],
return_alternates=False)
host_list = host_lists[0]
selection = host_list[0]
host, node, limits = (selection.service_host,
selection.nodename, selection.limits)
except exception.NoValidHost as ex:
if migration:
migration.status = 'error'
@ -1008,8 +1013,8 @@ class ComputeTaskManager(base.Base):
# Add all the UUIDs for the instances
instance_uuids = [spec.instance_uuid for spec in request_specs]
try:
hosts = self._schedule_instances(context, request_specs[0],
instance_uuids)
host_lists = self._schedule_instances(context, request_specs[0],
instance_uuids, return_alternates=True)
except Exception as exc:
LOG.exception('Failed to schedule instances')
self._bury_in_cell0(context, request_specs[0], exc,
@ -1020,19 +1025,22 @@ class ComputeTaskManager(base.Base):
cell_mapping_cache = {}
instances = []
for (build_request, request_spec, host) in six.moves.zip(
build_requests, request_specs, hosts):
for (build_request, request_spec, host_list) in six.moves.zip(
build_requests, request_specs, host_lists):
instance = build_request.get_new_instance(context)
# host_list is a list of one or more Selection objects, the first
# of which has been selected and its resources claimed.
host = host_list[0]
# Convert host from the scheduler into a cell record
if host['host'] not in host_mapping_cache:
if host.service_host not in host_mapping_cache:
try:
host_mapping = objects.HostMapping.get_by_host(
context, host['host'])
host_mapping_cache[host['host']] = host_mapping
context, host.service_host)
host_mapping_cache[host.service_host] = host_mapping
except exception.HostMappingNotFound as exc:
LOG.error('No host-to-cell mapping found for selected '
'host %(host)s. Setup is incomplete.',
{'host': host['host']})
{'host': host.service_host})
self._bury_in_cell0(context, request_spec, exc,
build_requests=[build_request],
instances=[instance])
@ -1040,7 +1048,7 @@ class ComputeTaskManager(base.Base):
instances.append(None)
continue
else:
host_mapping = host_mapping_cache[host['host']]
host_mapping = host_mapping_cache[host.service_host]
cell = host_mapping.cell_mapping
@ -1062,7 +1070,7 @@ class ComputeTaskManager(base.Base):
else:
instance.availability_zone = (
availability_zones.get_host_availability_zone(
context, host['host']))
context, host.service_host))
with obj_target_cell(instance, cell):
instance.create()
instances.append(instance)
@ -1085,13 +1093,17 @@ class ComputeTaskManager(base.Base):
request_specs,
cell_mapping_cache)
for (build_request, request_spec, host, instance) in six.moves.zip(
build_requests, request_specs, hosts, instances):
zipped = six.moves.zip(build_requests, request_specs, host_lists,
instances)
for (build_request, request_spec, host_list, instance) in zipped:
if instance is None:
# Skip placeholders that were buried in cell0 or had their
# build requests deleted by the user before instance create.
continue
cell = cell_mapping_cache[instance.uuid]
# host_list is a list of one or more Selection objects, the first
# of which has been selected and its resources claimed.
host = host_list[0]
filter_props = request_spec.to_legacy_filter_properties_dict()
scheduler_utils.populate_retry(filter_props, instance.uuid)
scheduler_utils.populate_filter_properties(filter_props,
@ -1138,7 +1150,6 @@ class ComputeTaskManager(base.Base):
# pass the objects.
legacy_secgroups = [s.identifier
for s in request_spec.security_groups]
with obj_target_cell(instance, cell) as cctxt:
self.compute_rpcapi.build_and_run_instance(
cctxt, instance=instance, image=image,
@ -1149,8 +1160,8 @@ class ComputeTaskManager(base.Base):
requested_networks=requested_networks,
security_groups=legacy_secgroups,
block_device_mapping=instance_bdms,
host=host['host'], node=host['nodename'],
limits=host['limits'])
host=host.service_host, node=host.nodename,
limits=host.limits)
def _cleanup_build_artifacts(self, context, exc, instances, build_requests,
request_specs, cell_mapping_cache):

View File

@ -313,9 +313,13 @@ class LiveMigrationTask(base.TaskBase):
self._check_not_over_max_retries(attempted_hosts)
request_spec.ignore_hosts = attempted_hosts
try:
hoststate = self.scheduler_client.select_destinations(
self.context, request_spec, [self.instance.uuid])[0]
host = hoststate['host']
selection_lists = self.scheduler_client.select_destinations(
self.context, request_spec, [self.instance.uuid],
return_objects=True, return_alternates=False)
# We only need the first item in the first list, as there is
# only one instance, and we don't care about any alternates.
selection = selection_lists[0][0]
host = selection.service_host
except messaging.RemoteError as ex:
# TODO(ShaoHe Feng) There maybe multi-scheduler, and the
# scheduling algorithm is R-R, we can let other scheduler try.
@ -334,9 +338,9 @@ class LiveMigrationTask(base.TaskBase):
# The scheduler would have created allocations against the
# selected destination host in Placement, so we need to remove
# those before moving on.
self._remove_host_allocations(host, hoststate['nodename'])
self._remove_host_allocations(host, selection.nodename)
host = None
return host, hoststate['nodename']
return selection.service_host, selection.nodename
def _remove_host_allocations(self, host, node):
"""Removes instance allocations against the given host from Placement

View File

@ -239,17 +239,21 @@ class MigrationTask(base.TaskBase):
cell=instance_mapping.cell_mapping)
migration = self._preallocate_migration()
# For now, don't request alternates. A later patch in the series will
# modify migration to use alternates instead of calling the scheduler
# again.
selection_lists = self.scheduler_client.select_destinations(
self.context, self.request_spec, [self.instance.uuid],
return_objects=True, return_alternates=False)
# We only need the first item in the first list, as there is only one
# instance, and we don't care about any alternates.
selection = selection_lists[0][0]
hosts = self.scheduler_client.select_destinations(
self.context, self.request_spec, [self.instance.uuid])
host_state = hosts[0]
scheduler_utils.populate_filter_properties(legacy_props,
host_state)
scheduler_utils.populate_filter_properties(legacy_props, selection)
# context is not serializable
legacy_props.pop('context', None)
(host, node) = (host_state['host'], host_state['nodename'])
(host, node) = (selection.service_host, selection.nodename)
self.instance.availability_zone = (
availability_zones.get_host_availability_zone(

View File

@ -212,8 +212,12 @@ class RequestSpec(base.NovaObject):
# Set the value anyway to avoid any call to obj_attr_is_set for it
self.instance_group = None
def _from_limits(self, limits_dict):
self.limits = SchedulerLimits.from_dict(limits_dict)
def _from_limits(self, limits):
if isinstance(limits, dict):
self.limits = SchedulerLimits.from_dict(limits)
else:
# Already a SchedulerLimits object.
self.limits = limits
def _from_hints(self, hints_dict):
if hints_dict is None:

View File

@ -152,10 +152,6 @@ class SchedulerManager(manager.Manager):
allocation_request_version, return_alternates)
# If `return_objects` is False, we need to convert the selections to
# the older format, which is a list of host state dicts.
# NOTE(edleafe): since the RPC calling side is not yet updated in this
# patch, return_objects will always be False. This prevents sending the
# new Selection objects back until a later patch where the calling RPC
# will be changed.
if not return_objects:
selection_dicts = [sel[0].to_dict() for sel in selections]
return jsonutils.to_primitive(selection_dicts)

View File

@ -497,9 +497,11 @@ def populate_filter_properties(filter_properties, selection):
else:
host = selection.service_host
nodename = selection.nodename
limits = selection.limits if "limits" in selection else {}
# 'limits' can also be None, so handle that as an empty dict
limits = limits or {}
# Need to convert SchedulerLimits object to older dict format.
if "limits" in selection and selection.limits is not None:
limits = selection.limits.to_dict()
else:
limits = {}
# Adds a retry entry for the selected compute host and node:
_add_retry_host(filter_properties, host, nodename)

View File

@ -29,6 +29,12 @@ from nova.tests import uuidsentinel as uuids
from nova import utils
fake_selection1 = objects.Selection(service_host="host1", nodename="node1",
cell_uuid=uuids.cell)
fake_selection2 = objects.Selection(service_host="host2", nodename="node2",
cell_uuid=uuids.cell)
class LiveMigrationTaskTestCase(test.NoDBTestCase):
def setUp(self):
super(LiveMigrationTaskTestCase, self).setUp()
@ -333,8 +339,9 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
self.context, self.fake_spec)
self.fake_spec.reset_forced_destinations()
self.task.scheduler_client.select_destinations(
self.context, self.fake_spec, [self.instance.uuid]).AndReturn(
[{'host': 'host1', 'nodename': 'node1'}])
self.context, self.fake_spec, [self.instance.uuid],
return_objects=True, return_alternates=False).AndReturn(
[[fake_selection1]])
self.task._check_compatible_with_source_hypervisor("host1")
self.task._call_livem_checks_on_host("host1")
@ -366,14 +373,15 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
check_compat, call_livem_checks):
get_image.return_value = "image"
from_components.return_value = another_spec
select_dest.return_value = [{'host': 'host1', 'nodename': 'node1'}]
select_dest.return_value = [[fake_selection1]]
self.assertEqual(("host1", "node1"), task._find_destination())
get_image.assert_called_once_with(self.instance.system_metadata)
setup_ig.assert_called_once_with(self.context, another_spec)
select_dest.assert_called_once_with(self.context, another_spec,
[self.instance.uuid])
[self.instance.uuid], return_objects=True,
return_alternates=False)
# Make sure the request_spec was updated to include the cell
# mapping.
self.assertIsNotNone(another_spec.requested_destination.cell)
@ -393,8 +401,8 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
scheduler_utils.setup_instance_group(self.context, self.fake_spec)
self.task.scheduler_client.select_destinations(self.context,
self.fake_spec, [self.instance.uuid]).AndReturn(
[{'host': 'host1', 'nodename': 'node1'}])
self.fake_spec, [self.instance.uuid], return_objects=True,
return_alternates=False).AndReturn([[fake_selection1]])
self.task._check_compatible_with_source_hypervisor("host1")
self.task._call_livem_checks_on_host("host1")
@ -414,14 +422,14 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
self.instance.system_metadata).AndReturn("image")
scheduler_utils.setup_instance_group(self.context, self.fake_spec)
self.task.scheduler_client.select_destinations(self.context,
self.fake_spec, [self.instance.uuid]).AndReturn(
[{'host': 'host1', 'nodename': 'node1'}])
self.fake_spec, [self.instance.uuid], return_objects=True,
return_alternates=False).AndReturn([[fake_selection1]])
self.task._check_compatible_with_source_hypervisor("host1")\
.AndRaise(error)
self.task.scheduler_client.select_destinations(self.context,
self.fake_spec, [self.instance.uuid]).AndReturn(
[{'host': 'host2', 'nodename': 'node2'}])
self.fake_spec, [self.instance.uuid], return_objects=True,
return_alternates=False).AndReturn([[fake_selection2]])
self.task._check_compatible_with_source_hypervisor("host2")
self.task._call_livem_checks_on_host("host2")
@ -454,15 +462,15 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
self.instance.system_metadata).AndReturn("image")
scheduler_utils.setup_instance_group(self.context, self.fake_spec)
self.task.scheduler_client.select_destinations(self.context,
self.fake_spec, [self.instance.uuid]).AndReturn(
[{'host': 'host1', 'nodename': 'node1'}])
self.fake_spec, [self.instance.uuid], return_objects=True,
return_alternates=False).AndReturn([[fake_selection1]])
self.task._check_compatible_with_source_hypervisor("host1")
self.task._call_livem_checks_on_host("host1")\
.AndRaise(exception.Invalid)
self.task.scheduler_client.select_destinations(self.context,
self.fake_spec, [self.instance.uuid]).AndReturn(
[{'host': 'host2', 'nodename': 'node2'}])
self.fake_spec, [self.instance.uuid], return_objects=True,
return_alternates=False).AndReturn([[fake_selection2]])
self.task._check_compatible_with_source_hypervisor("host2")
self.task._call_livem_checks_on_host("host2")
@ -487,15 +495,15 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
self.instance.system_metadata).AndReturn("image")
scheduler_utils.setup_instance_group(self.context, self.fake_spec)
self.task.scheduler_client.select_destinations(self.context,
self.fake_spec, [self.instance.uuid]).AndReturn(
[{'host': 'host1', 'nodename': 'node1'}])
self.fake_spec, [self.instance.uuid], return_objects=True,
return_alternates=False).AndReturn([[fake_selection1]])
self.task._check_compatible_with_source_hypervisor("host1")
self.task._call_livem_checks_on_host("host1")\
.AndRaise(exception.MigrationPreCheckError("reason"))
self.task.scheduler_client.select_destinations(self.context,
self.fake_spec, [self.instance.uuid]).AndReturn(
[{'host': 'host2', 'nodename': 'node2'}])
self.fake_spec, [self.instance.uuid], return_objects=True,
return_alternates=False).AndReturn([[fake_selection2]])
self.task._check_compatible_with_source_hypervisor("host2")
self.task._call_livem_checks_on_host("host2")
@ -519,8 +527,8 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
self.instance.system_metadata).AndReturn("image")
scheduler_utils.setup_instance_group(self.context, self.fake_spec)
self.task.scheduler_client.select_destinations(self.context,
self.fake_spec, [self.instance.uuid]).AndReturn(
[{'host': 'host1', 'nodename': 'node1'}])
self.fake_spec, [self.instance.uuid], return_objects=True,
return_alternates=False).AndReturn([[fake_selection1]])
self.task._check_compatible_with_source_hypervisor("host1")\
.AndRaise(exception.DestinationHypervisorTooOld)
@ -547,8 +555,9 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
self.instance.system_metadata).AndReturn("image")
scheduler_utils.setup_instance_group(self.context, self.fake_spec)
self.task.scheduler_client.select_destinations(self.context,
self.fake_spec, [self.instance.uuid]).AndRaise(
exception.NoValidHost(reason=""))
self.fake_spec, [self.instance.uuid], return_objects=True,
return_alternates=False).AndRaise(
exception.NoValidHost(reason=""))
self.mox.ReplayAll()
self.assertRaises(exception.NoValidHost, self.task._find_destination)

View File

@ -43,9 +43,10 @@ class MigrationTaskTestCase(test.NoDBTestCase):
self.instance = objects.Instance._from_db_object(
self.context, inst_object, inst, [])
self.request_spec = objects.RequestSpec(image=objects.ImageMeta())
self.hosts = [dict(host='host1', nodename=None, limits={})]
self.host_lists = [[objects.Selection(service_host="host1",
nodename="node1", cell_uuid=uuids.cell1)]]
self.filter_properties = {'limits': {}, 'retry': {'num_attempts': 1,
'hosts': [['host1', None]]}}
'hosts': [['host1', 'node1']]}}
self.reservations = []
self.clean_shutdown = True
@ -64,7 +65,7 @@ class MigrationTaskTestCase(test.NoDBTestCase):
def test_execute_legacy_no_pre_create_migration(self, prep_resize_mock,
sel_dest_mock, sig_mock,
az_mock, gmv_mock):
sel_dest_mock.return_value = self.hosts
sel_dest_mock.return_value = self.host_lists
az_mock.return_value = 'myaz'
task = self._generate_task()
legacy_request_spec = self.request_spec.to_legacy_request_spec_dict()
@ -73,13 +74,15 @@ class MigrationTaskTestCase(test.NoDBTestCase):
sig_mock.assert_called_once_with(self.context, self.request_spec)
task.scheduler_client.select_destinations.assert_called_once_with(
self.context, self.request_spec, [self.instance.uuid])
self.context, self.request_spec, [self.instance.uuid],
return_objects=True, return_alternates=False)
selection = self.host_lists[0][0]
prep_resize_mock.assert_called_once_with(
self.context, self.instance, legacy_request_spec['image'],
self.flavor, self.hosts[0]['host'], None, self.reservations,
self.flavor, selection.service_host, None, self.reservations,
request_spec=legacy_request_spec,
filter_properties=self.filter_properties,
node=self.hosts[0]['nodename'], clean_shutdown=self.clean_shutdown)
filter_properties=self.filter_properties, node=selection.nodename,
clean_shutdown=self.clean_shutdown)
az_mock.assert_called_once_with(self.context, 'host1')
self.assertIsNone(task._migration)
@ -95,7 +98,7 @@ class MigrationTaskTestCase(test.NoDBTestCase):
def _test_execute(self, prep_resize_mock, sel_dest_mock, sig_mock, az_mock,
gmv_mock, cm_mock, sm_mock, cn_mock, rc_mock,
requested_destination=False):
sel_dest_mock.return_value = self.hosts
sel_dest_mock.return_value = self.host_lists
az_mock.return_value = 'myaz'
if requested_destination:
@ -126,13 +129,15 @@ class MigrationTaskTestCase(test.NoDBTestCase):
sig_mock.assert_called_once_with(self.context, self.request_spec)
task.scheduler_client.select_destinations.assert_called_once_with(
self.context, self.request_spec, [self.instance.uuid])
self.context, self.request_spec, [self.instance.uuid],
return_objects=True, return_alternates=False)
selection = self.host_lists[0][0]
prep_resize_mock.assert_called_once_with(
self.context, self.instance, legacy_request_spec['image'],
self.flavor, self.hosts[0]['host'], task._migration,
self.flavor, selection.service_host, task._migration,
self.reservations, request_spec=legacy_request_spec,
filter_properties=self.filter_properties,
node=self.hosts[0]['nodename'], clean_shutdown=self.clean_shutdown)
filter_properties=self.filter_properties, node=selection.nodename,
clean_shutdown=self.clean_shutdown)
az_mock.assert_called_once_with(self.context, 'host1')
self.assertIsNotNone(task._migration)
@ -178,7 +183,7 @@ class MigrationTaskTestCase(test.NoDBTestCase):
def test_execute_rollback(self, prep_resize_mock, sel_dest_mock, sig_mock,
az_mock, gmv_mock, cm_mock, sm_mock, cn_mock,
rc_mock, mock_ra):
sel_dest_mock.return_value = self.hosts
sel_dest_mock.return_value = self.host_lists
az_mock.return_value = 'myaz'
task = self._generate_task()
gmv_mock.return_value = 23

View File

@ -65,6 +65,12 @@ from nova import utils
CONF = conf.CONF
fake_selection1 = objects.Selection(service_host="host1", nodename="node1",
cell_uuid=uuids.cell, limits=None)
fake_selection2 = objects.Selection(service_host="host2", nodename="node2",
cell_uuid=uuids.cell, limits=None)
class FakeContext(context.RequestContext):
def elevated(self):
"""Return a consistent elevated context so we can detect it."""
@ -414,9 +420,8 @@ class _BaseTaskTestCase(object):
'num_instances': 2}
filter_properties = {'retry': {'num_attempts': 1, 'hosts': []}}
self.conductor_manager._schedule_instances(self.context,
fake_spec, [uuids.fake, uuids.fake]).AndReturn(
[{'host': 'host1', 'nodename': 'node1', 'limits': []},
{'host': 'host2', 'nodename': 'node2', 'limits': []}])
fake_spec, [uuids.fake, uuids.fake], return_alternates=True
).AndReturn([[fake_selection1], [fake_selection2]])
db.block_device_mapping_get_all_by_instance(self.context,
instances[0].uuid).AndReturn([])
self.conductor_manager.compute_rpcapi.build_and_run_instance(
@ -431,13 +436,13 @@ class _BaseTaskTestCase(object):
'num_instances': 2},
filter_properties={'retry': {'num_attempts': 1,
'hosts': [['host1', 'node1']]},
'limits': []},
'limits': {}},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping=mox.IgnoreArg(),
node='node1', limits=[])
node='node1', limits=None)
db.block_device_mapping_get_all_by_instance(self.context,
instances[1].uuid).AndReturn([])
self.conductor_manager.compute_rpcapi.build_and_run_instance(
@ -450,7 +455,7 @@ class _BaseTaskTestCase(object):
'instance_properties': instance_properties,
'instance_type': instance_type_p,
'num_instances': 2},
filter_properties={'limits': [],
filter_properties={'limits': {},
'retry': {'num_attempts': 1,
'hosts': [['host2', 'node2']]}},
admin_password='admin_password',
@ -458,7 +463,7 @@ class _BaseTaskTestCase(object):
requested_networks=None,
security_groups='security_groups',
block_device_mapping=mox.IgnoreArg(),
node='node2', limits=[])
node='node2', limits=None)
self.mox.ReplayAll()
# build_instances() is a cast, we need to wait for it to complete
@ -636,9 +641,7 @@ class _BaseTaskTestCase(object):
mock_select_dests, mock_get_by_host, mock_get_inst_map_by_uuid,
_mock_save, _mock_buildreq):
mock_select_dests.return_value = [
{'host': 'host1', 'nodename': 'node1', 'limits': []},
{'host': 'host2', 'nodename': 'node2', 'limits': []}]
mock_select_dests.return_value = [[fake_selection1], [fake_selection2]]
instances = [fake_instance.fake_instance_obj(self.context)
for i in range(2)]
@ -678,10 +681,7 @@ class _BaseTaskTestCase(object):
mock_select_dests, mock_get_by_host, mock_get_inst_map_by_uuid,
_mock_save, mock_buildreq):
mock_select_dests.return_value = [
{'host': 'host1', 'nodename': 'node1', 'limits': []},
{'host': 'host2', 'nodename': 'node2', 'limits': []}]
mock_select_dests.return_value = [[fake_selection1], [fake_selection2]]
num_instances = 2
instances = [fake_instance.fake_instance_obj(self.context)
for i in range(num_instances)]
@ -727,9 +727,7 @@ class _BaseTaskTestCase(object):
mock_select_dests, mock_get_by_host, mock_get_inst_map_by_uuid,
_mock_save, _mock_buildreq):
mock_select_dests.return_value = [
{'host': 'host1', 'nodename': 'node1', 'limits': []},
{'host': 'host2', 'nodename': 'node2', 'limits': []}]
mock_select_dests.return_value = [[fake_selection1], [fake_selection2]]
mock_get_by_host.side_effect = [
objects.HostMapping(cell_mapping=objects.CellMapping(id=1)),
objects.HostMapping(cell_mapping=objects.CellMapping(id=2))]
@ -778,10 +776,7 @@ class _BaseTaskTestCase(object):
def test_build_instances_destroy_build_request(self, mock_select_dests,
mock_build_req_get):
mock_select_dests.return_value = [
{'host': 'host1', 'nodename': 'node1', 'limits': []},
{'host': 'host2', 'nodename': 'node2', 'limits': []}]
mock_select_dests.return_value = [[fake_selection1], [fake_selection2]]
num_instances = 2
instances = [fake_instance.fake_instance_obj(self.context)
for i in range(num_instances)]
@ -826,9 +821,7 @@ class _BaseTaskTestCase(object):
# conductor_manager._destroy_build_request() should not cause the
# build to stop.
mock_select_dests.return_value = [
{'host': 'host1', 'nodename': 'node1', 'limits': []}]
mock_select_dests.return_value = [[fake_selection1]]
instance = fake_instance.fake_instance_obj(self.context)
image = {'fake-data': 'should_pass_silently'}
@ -864,14 +857,14 @@ class _BaseTaskTestCase(object):
request_spec=mock.ANY,
filter_properties={'retry': {'num_attempts': 2,
'hosts': [['host1', 'node1']]},
'limits': []},
'limits': {}},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping=test.MatchType(
objects.BlockDeviceMappingList),
node='node1', limits=[])
node='node1', limits=None)
mock_pop_inst_map.assert_not_called()
mock_destroy_build_req.assert_not_called()
@ -950,13 +943,13 @@ class _BaseTaskTestCase(object):
to_filtprops.return_value = filter_properties
to_reqspec.return_value = request_spec
from_primitives.return_value = fake_spec
sched_instances.return_value = [host]
sched_instances.return_value = [[fake_selection1]]
self.conductor.unshelve_instance(self.context, instance, fake_spec)
reset_forced_destinations.assert_called_once_with()
from_primitives.assert_called_once_with(self.context, request_spec,
filter_properties)
sched_instances.assert_called_once_with(self.context, fake_spec,
[instance.uuid])
[instance.uuid], return_alternates=False)
self.assertEqual(cell_mapping,
fake_spec.requested_destination.cell)
# NOTE(sbauza): Since the instance is dehydrated when passing
@ -1010,9 +1003,7 @@ class _BaseTaskTestCase(object):
mock.patch.object(objects.InstanceMapping,
'get_by_instance_uuid'),
) as (schedule_mock, unshelve_mock, get_by_instance_uuid):
schedule_mock.return_value = [{'host': 'fake_host',
'nodename': 'fake_node',
'limits': {}}]
schedule_mock.return_value = [[fake_selection1]]
get_by_instance_uuid.return_value = objects.InstanceMapping(
cell_mapping=objects.CellMapping.get_by_uuid(
self.context, uuids.cell1))
@ -1047,11 +1038,11 @@ class _BaseTaskTestCase(object):
'fake_image_id', show_deleted=False).AndReturn('fake_image')
scheduler_utils.build_request_spec('fake_image',
mox.IgnoreArg()).AndReturn('req_spec')
fake_selection = objects.Selection(service_host="fake_host",
nodename="fake_node", limits=None)
self.conductor_manager._schedule_instances(self.context,
fake_spec, [instance.uuid]).AndReturn(
[{'host': 'fake_host',
'nodename': 'fake_node',
'limits': {}}])
fake_spec, [instance.uuid], return_alternates=False).AndReturn(
[[fake_selection]])
self.conductor_manager.compute_rpcapi.unshelve_instance(self.context,
instance, 'fake_host', image='fake_image',
filter_properties={'limits': {},
@ -1074,7 +1065,8 @@ class _BaseTaskTestCase(object):
instance.save()
system_metadata = instance.system_metadata
def fake_schedule_instances(context, request_spec, *instances):
def fake_schedule_instances(context, request_spec, *instances,
**kwargs):
raise exc.NoValidHost(reason='')
with test.nested(
@ -1147,11 +1139,11 @@ class _BaseTaskTestCase(object):
scheduler_utils.build_request_spec(None,
mox.IgnoreArg()).AndReturn('req_spec')
fake_selection = objects.Selection(service_host="fake_host",
nodename="fake_node", limits=None)
self.conductor_manager._schedule_instances(self.context,
fake_spec, [instance.uuid]).AndReturn(
[{'host': 'fake_host',
'nodename': 'fake_node',
'limits': {}}])
fake_spec, [instance.uuid], return_alternates=False).AndReturn(
[[fake_selection]])
self.conductor_manager.compute_rpcapi.unshelve_instance(self.context,
instance, 'fake_host', image=None,
filter_properties={'limits': {},
@ -1190,7 +1182,9 @@ class _BaseTaskTestCase(object):
inst_obj.host = 'noselect'
expected_host = 'thebesthost'
expected_node = 'thebestnode'
expected_limits = 'fake-limits'
expected_limits = None
fake_selection = objects.Selection(service_host=expected_host,
nodename=expected_node, limits=None)
rebuild_args, compute_args = self._prepare_rebuild_args(
{'host': None, 'node': expected_node, 'limits': expected_limits})
request_spec = {}
@ -1206,9 +1200,7 @@ class _BaseTaskTestCase(object):
return_value=fake_spec),
mock.patch.object(self.conductor_manager.scheduler_client,
'select_destinations',
return_value=[{'host': expected_host,
'nodename': expected_node,
'limits': expected_limits}]),
return_value=[[fake_selection]]),
mock.patch('nova.scheduler.utils.build_request_spec',
return_value=request_spec)
) as (rebuild_mock, sig_mock, fp_mock, select_dest_mock, bs_mock):
@ -1220,7 +1212,7 @@ class _BaseTaskTestCase(object):
fp_mock.assert_called_once_with(self.context, request_spec,
filter_properties)
select_dest_mock.assert_called_once_with(self.context, fake_spec,
inst_uuids)
inst_uuids, return_objects=True, return_alternates=False)
compute_args['host'] = expected_host
rebuild_mock.assert_called_once_with(self.context,
instance=inst_obj,
@ -1256,7 +1248,8 @@ class _BaseTaskTestCase(object):
fp_mock.assert_called_once_with(self.context, request_spec,
filter_properties)
select_dest_mock.assert_called_once_with(self.context, fake_spec,
[inst_obj.uuid])
[inst_obj.uuid], return_objects=True,
return_alternates=False)
self.assertFalse(rebuild_mock.called)
@mock.patch.object(conductor_manager.compute_rpcapi.ComputeAPI,
@ -1341,7 +1334,9 @@ class _BaseTaskTestCase(object):
inst_obj.host = 'noselect'
expected_host = 'thebesthost'
expected_node = 'thebestnode'
expected_limits = 'fake-limits'
expected_limits = None
fake_selection = objects.Selection(service_host=expected_host,
nodename=expected_node, limits=None)
fake_spec = objects.RequestSpec(ignore_hosts=[])
rebuild_args, compute_args = self._prepare_rebuild_args(
{'host': None, 'node': expected_node, 'limits': expected_limits,
@ -1353,9 +1348,7 @@ class _BaseTaskTestCase(object):
return_value=False),
mock.patch.object(self.conductor_manager.scheduler_client,
'select_destinations',
return_value=[{'host': expected_host,
'nodename': expected_node,
'limits': expected_limits}]),
return_value=[[fake_selection]]),
mock.patch.object(fake_spec, 'reset_forced_destinations'),
) as (rebuild_mock, sig_mock, select_dest_mock, reset_fd):
self.conductor_manager.rebuild_instance(context=self.context,
@ -1366,7 +1359,8 @@ class _BaseTaskTestCase(object):
else:
reset_fd.assert_not_called()
select_dest_mock.assert_called_once_with(self.context,
fake_spec, [inst_obj.uuid])
fake_spec, [inst_obj.uuid], return_objects=True,
return_alternates=False)
compute_args['host'] = expected_host
rebuild_mock.assert_called_once_with(self.context,
instance=inst_obj,
@ -1429,9 +1423,7 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
select_destinations,
build_and_run_instance,
get_az):
select_destinations.return_value = [{'host': 'fake-host',
'nodename': 'fake-nodename',
'limits': None}]
select_destinations.return_value = [[fake_selection1]]
get_az.return_value = 'myaz'
details = {}
@ -1442,11 +1434,11 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
self.assertEqual(1, len(kwargs['block_device_mapping']))
# FIXME(danms): How to validate the db connection here?
self.start_service('compute', host='fake-host')
self.start_service('compute', host='host1')
build_and_run_instance.side_effect = _build_and_run_instance
self.conductor.schedule_and_build_instances(**params)
self.assertTrue(build_and_run_instance.called)
get_az.assert_called_once_with(mock.ANY, 'fake-host')
get_az.assert_called_once_with(mock.ANY, 'host1')
instance_uuid = details['instance'].uuid
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
@ -1524,23 +1516,12 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
build_and_run_instance):
# This list needs to match the number of build_requests and the number
# of request_specs in params.
select_destinations.return_value = [{'host': 'fake-host',
'nodename': 'fake-nodename',
'limits': None},
{'host': 'fake-host',
'nodename': 'fake-nodename',
'limits': None},
{'host': 'fake-host2',
'nodename': 'fake-nodename2',
'limits': None},
{'host': 'fake-host2',
'nodename': 'fake-nodename2',
'limits': None}]
select_destinations.return_value = [[fake_selection1],
[fake_selection2], [fake_selection1], [fake_selection2]]
params = self.params
self.start_service('compute', host='fake-host')
self.start_service('compute', host='fake-host2')
self.start_service('compute', host='host1')
self.start_service('compute', host='host2')
# Because of the cache, this should only be called twice,
# once for the first and once for the third request.
@ -1592,18 +1573,14 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
"""Test that creates two instances in separate cells."""
# This list needs to match the number of build_requests and the number
# of request_specs in params.
select_destinations.return_value = [{'host': 'fake-host',
'nodename': 'fake-nodename',
'limits': None},
{'host': 'fake-host2',
'nodename': 'fake-nodename2',
'limits': None}]
select_destinations.return_value = [[fake_selection1],
[fake_selection2]]
params = self.params
# The cells are created in the base TestCase setup.
self.start_service('compute', host='fake-host', cell='cell1')
self.start_service('compute', host='fake-host2', cell='cell2')
self.start_service('compute', host='host1', cell='cell1')
self.start_service('compute', host='host2', cell='cell2')
get_hostmapping.side_effect = self.host_mappings.values()
@ -1663,10 +1640,8 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
taglist_destroy):
br_destroy.side_effect = exc.BuildRequestNotFound(uuid='foo')
self.start_service('compute', host='fake-host')
select_destinations.return_value = [{'host': 'fake-host',
'nodename': 'nodesarestupid',
'limits': None}]
self.start_service('compute', host='host1')
select_destinations.return_value = [[fake_selection1]]
taglist_create.return_value = self.params['tags']
self.conductor.schedule_and_build_instances(**self.params)
self.assertFalse(build_and_run.called)
@ -1700,10 +1675,8 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
None,
]
self.start_service('compute', host='fake-host')
select_destinations.return_value = [{'host': 'fake-host',
'nodename': 'nodesarestupid',
'limits': None}]
self.start_service('compute', host='host1')
select_destinations.return_value = [[fake_selection1]]
self.conductor.schedule_and_build_instances(**self.params)
self.assertFalse(build_and_run.called)
self.assertFalse(bury.called)
@ -1730,10 +1703,8 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
None,
]
self.start_service('compute', host='fake-host')
select_destinations.return_value = [{'host': 'fake-host',
'nodename': 'nodesarestupid',
'limits': None}]
self.start_service('compute', host='host1')
select_destinations.return_value = [[fake_selection1]]
self.conductor.schedule_and_build_instances(**self.params)
self.assertFalse(build_and_run.called)
self.assertFalse(bury.called)
@ -1760,15 +1731,9 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
"""
inst_uuid = self.params['build_requests'][0].instance.uuid
br_get_by_inst.side_effect = exc.BuildRequestNotFound(uuid=inst_uuid)
self.start_service('compute', host='fake-host')
select_destinations.return_value = [{'host': 'fake-host',
'nodename': 'nodesarestupid',
'limits': None}]
with mock.patch.object(self.conductor.scheduler_client,
'reportclient') as mock_rc:
self.conductor.schedule_and_build_instances(**self.params)
mock_rc.delete_allocation_for_instance.assert_called_once_with(
inst_uuid)
self.start_service('compute', host='host1')
select_destinations.return_value = [[fake_selection1]]
self.conductor.schedule_and_build_instances(**self.params)
# we don't create the instance since the build request is gone
self.assertFalse(inst_create.called)
# we don't build the instance since we didn't create it
@ -1796,9 +1761,7 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
instances[0].uuid)
bury.side_effect = _fake_bury
select_dest.return_value = [{'host': 'missing-host',
'nodename': 'nodesarestupid',
'limits': None}]
select_dest.return_value = [[fake_selection1]]
self.conductor.schedule_and_build_instances(**self.params)
self.assertTrue(bury.called)
self.assertFalse(build_and_run.called)
@ -1807,9 +1770,7 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
@mock.patch('nova.scheduler.rpcapi.SchedulerAPI.select_destinations')
def test_schedule_and_build_over_quota_during_recheck(self, mock_select,
mock_check):
mock_select.return_value = [{'host': 'fake-host',
'nodename': 'fake-nodename',
'limits': None}]
mock_select.return_value = [[fake_selection1]]
# Simulate a race where the first check passes and the recheck fails.
# First check occurs in compute/api.
fake_quotas = {'instances': 5, 'cores': 10, 'ram': 4096}
@ -1831,7 +1792,7 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
self.stub_out('nova.objects.Instance.save', fake_save)
# This is needed to register the compute node in a cell.
self.start_service('compute', host='fake-host')
self.start_service('compute', host='host1')
self.assertRaises(
exc.TooManyInstances,
self.conductor.schedule_and_build_instances, **self.params)
@ -1870,13 +1831,11 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
@mock.patch('nova.scheduler.rpcapi.SchedulerAPI.select_destinations')
def test_schedule_and_build_no_quota_recheck(self, mock_select,
mock_check, mock_build):
mock_select.return_value = [{'host': 'fake-host',
'nodename': 'fake-nodename',
'limits': None}]
mock_select.return_value = [[fake_selection1]]
# Disable recheck_quota.
self.flags(recheck_quota=False, group='quota')
# This is needed to register the compute node in a cell.
self.start_service('compute', host='fake-host')
self.start_service('compute', host='host1')
self.conductor.schedule_and_build_instances(**self.params)
# check_deltas should not have been called a second time. The first
@ -2302,10 +2261,10 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
cell_mapping=objects.CellMapping.get_by_uuid(self.context,
uuids.cell1))
hosts = [dict(host='host1', nodename=None, limits={})]
hosts = [dict(host='host1', nodename='node1', limits={})]
metadata_mock.return_value = image
exc_info = test.TestingException('something happened')
select_dest_mock.return_value = hosts
select_dest_mock.return_value = [[fake_selection1]]
updates = {'vm_state': vm_states.STOPPED,
'task_state': None}
@ -2317,13 +2276,13 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
# Filter properties are populated during code execution
legacy_filter_props = {'retry': {'num_attempts': 1,
'hosts': [['host1', None]]},
'hosts': [['host1', 'node1']]},
'limits': {}}
metadata_mock.assert_called_with({})
sig_mock.assert_called_once_with(self.context, fake_spec)
select_dest_mock.assert_called_once_with(
self.context, fake_spec, [inst_obj.uuid])
select_dest_mock.assert_called_once_with(self.context, fake_spec,
[inst_obj.uuid], return_objects=True, return_alternates=False)
prep_resize_mock.assert_called_once_with(
self.context, inst_obj, legacy_request_spec['image'],
flavor, hosts[0]['host'], None, [resvs],
@ -2425,16 +2384,15 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
filter_properties = {'retry': {'num_attempts': 1, 'hosts': []}}
inst_uuids = [inst.uuid for inst in instances]
self.conductor_manager._schedule_instances(self.context,
fake_spec, inst_uuids).AndReturn(
[{'host': 'host1', 'nodename': 'node1', 'limits': []},
{'host': 'host2', 'nodename': 'node2', 'limits': []}])
fake_spec, inst_uuids, return_alternates=True).AndReturn(
[[fake_selection1], [fake_selection2]])
instances[0].save().AndRaise(
exc.InstanceNotFound(instance_id=instances[0].uuid))
instances[1].save()
self.conductor_manager.compute_rpcapi.build_and_run_instance(
self.context, instance=instances[1], host='host2',
image={'fake-data': 'should_pass_silently'}, request_spec=spec,
filter_properties={'limits': [],
filter_properties={'limits': {},
'retry': {'num_attempts': 1,
'hosts': [['host2',
'node2']]}},
@ -2443,7 +2401,7 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
requested_networks=None,
security_groups='security_groups',
block_device_mapping=mox.IsA(objects.BlockDeviceMappingList),
node='node2', limits=[])
node='node2', limits=None)
self.mox.ReplayAll()
# build_instances() is a cast, we need to wait for it to complete
@ -2468,8 +2426,7 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
instances = [fake_instance.fake_instance_obj(self.context)
for i in range(2)]
image = {'fake-data': 'should_pass_silently'}
destinations = [{'host': 'host1', 'nodename': 'node1', 'limits': []},
{'host': 'host2', 'nodename': 'node2', 'limits': []}]
destinations = [[fake_selection1], [fake_selection2]]
spec = {'fake': 'specs',
'instance_properties': instances[0]}
build_request_spec.return_value = spec
@ -2510,7 +2467,7 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
build_and_run_instance.assert_called_once_with(self.context,
instance=instances[1], host='host2', image={'fake-data':
'should_pass_silently'}, request_spec=spec,
filter_properties={'limits': [],
filter_properties={'limits': {},
'retry': {'num_attempts': 1,
'hosts': [['host2',
'node2']]}},
@ -2519,7 +2476,7 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
requested_networks=None,
security_groups='security_groups',
block_device_mapping=mock.ANY,
node='node2', limits=[])
node='node2', limits=None)
def test_cleanup_allocated_networks_none_requested(self):
# Tests that we don't deallocate networks if 'none' were specifically

View File

@ -194,6 +194,8 @@ class SchedulerUtilsTestCase(test.NoDBTestCase):
expected_limits = None
elif no_limits:
expected_limits = {}
elif isinstance(fake_limits, objects.SchedulerLimits):
expected_limits = fake_limits.to_dict()
else:
expected_limits = fake_limits
self.assertEqual(expected_limits,