Merge "Revert "Refine waiting for vif plug events during _hard_reboot""
This commit is contained in:
commit
43c72f02f2
@ -12899,28 +12899,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
||||
mock_hard_reboot.assert_called_once_with(self.context,
|
||||
instance, [], None)
|
||||
|
||||
@mock.patch('nova.virt.libvirt.LibvirtDriver._get_neutron_events')
|
||||
@mock.patch('nova.virt.libvirt.LibvirtDriver.plug_vifs')
|
||||
@mock.patch('nova.virt.libvirt.LibvirtDriver._lxc_disk_handler')
|
||||
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_domain')
|
||||
def test_create_domain_and_network_reboot(self, mock_create, mock_handler,
|
||||
mock_plug, mock_events):
|
||||
# Verify that we call get_neutron_events with reboot=True if
|
||||
# create_domain_and_network was called with reboot=True
|
||||
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
||||
instance = objects.Instance(**self.test_instance)
|
||||
network_info = _fake_network_info(self, 1)
|
||||
|
||||
@mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering')
|
||||
@mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter')
|
||||
@mock.patch.object(drvr.firewall_driver, 'apply_instance_filter')
|
||||
def _do_create(mock_apply, mock_prepare, mock_setup):
|
||||
drvr._create_domain_and_network(self.context, mock.sentinel.xml,
|
||||
instance, network_info,
|
||||
reboot=True)
|
||||
_do_create()
|
||||
mock_events.assert_called_once_with(network_info, reboot=True)
|
||||
|
||||
@mock.patch('nova.virt.libvirt.LibvirtDriver.get_info')
|
||||
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_domain_and_network')
|
||||
@mock.patch('nova.virt.libvirt.LibvirtDriver._get_guest_xml')
|
||||
@ -12990,7 +12968,7 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
||||
block_device_info=block_device_info, mdevs=[uuids.mdev1])
|
||||
mock_create_domain_and_network.assert_called_once_with(self.context,
|
||||
dummyxml, instance, network_info,
|
||||
block_device_info=block_device_info, reboot=True)
|
||||
block_device_info=block_device_info, vifs_already_plugged=True)
|
||||
|
||||
@mock.patch('oslo_utils.fileutils.ensure_tree')
|
||||
@mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall')
|
||||
@ -15911,19 +15889,6 @@ class LibvirtConnTestCase(test.NoDBTestCase,
|
||||
events = drvr._get_neutron_events(network_info)
|
||||
self.assertEqual([('network-vif-plugged', '1')], events)
|
||||
|
||||
def test_get_neutron_events_reboot(self):
|
||||
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
|
||||
bridge = network_model.VIF_TYPE_BRIDGE
|
||||
ovs = network_model.VIF_TYPE_OVS
|
||||
network_info = [network_model.VIF(id='1'),
|
||||
network_model.VIF(id='2', active=True),
|
||||
network_model.VIF(id='3', type=bridge),
|
||||
network_model.VIF(id='4', type=ovs)]
|
||||
events = drvr._get_neutron_events(network_info, reboot=True)
|
||||
self.assertEqual([('network-vif-plugged', '1'),
|
||||
('network-vif-plugged', '2'),
|
||||
('network-vif-plugged', '4')], events)
|
||||
|
||||
def test_unplug_vifs_ignores_errors(self):
|
||||
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
|
||||
with mock.patch.object(drvr, 'vif_driver') as vif_driver:
|
||||
|
@ -2805,9 +2805,14 @@ class LibvirtDriver(driver.ComputeDriver):
|
||||
|
||||
# Initialize all the necessary networking, block devices and
|
||||
# start the instance.
|
||||
self._create_domain_and_network(
|
||||
context, xml, instance, network_info,
|
||||
block_device_info=block_device_info, reboot=True)
|
||||
# NOTE(melwitt): Pass vifs_already_plugged=True here even though we've
|
||||
# unplugged vifs earlier. The behavior of neutron plug events depends
|
||||
# on which vif type we're using and we are working with a stale network
|
||||
# info cache here, so won't rely on waiting for neutron plug events.
|
||||
# vifs_already_plugged=True means "do not wait for neutron plug events"
|
||||
self._create_domain_and_network(context, xml, instance, network_info,
|
||||
block_device_info=block_device_info,
|
||||
vifs_already_plugged=True)
|
||||
self._prepare_pci_devices_for_use(
|
||||
pci_manager.get_instance_pci_devs(instance, 'all'))
|
||||
|
||||
@ -5503,25 +5508,14 @@ class LibvirtDriver(driver.ComputeDriver):
|
||||
if CONF.vif_plugging_is_fatal:
|
||||
raise exception.VirtualInterfaceCreateException()
|
||||
|
||||
def _get_neutron_events(self, network_info, reboot=False):
|
||||
def eventable(vif):
|
||||
if reboot:
|
||||
# NOTE(melwitt): We won't expect events for the bridge vif
|
||||
# type during a reboot because the neutron agent might not
|
||||
# detect that we have unplugged and plugged vifs with os-vif.
|
||||
# We also disregard the 'active' status of the vif during a
|
||||
# reboot because the stale network_info we get from the compute
|
||||
# manager won't show active=False for the vifs we've unplugged.
|
||||
return vif.get('type') != network_model.VIF_TYPE_BRIDGE
|
||||
# NOTE(danms): We need to collect any VIFs that are currently
|
||||
# down that we expect a down->up event for. Anything that is
|
||||
# already up will not undergo that transition, and for
|
||||
# anything that might be stale (cache-wise) assume it's
|
||||
# already up so we don't block on it.
|
||||
return vif.get('active', True) is False
|
||||
|
||||
def _get_neutron_events(self, network_info):
|
||||
# NOTE(danms): We need to collect any VIFs that are currently
|
||||
# down that we expect a down->up event for. Anything that is
|
||||
# already up will not undergo that transition, and for
|
||||
# anything that might be stale (cache-wise) assume it's
|
||||
# already up so we don't block on it.
|
||||
return [('network-vif-plugged', vif['id'])
|
||||
for vif in network_info if eventable(vif)]
|
||||
for vif in network_info if vif.get('active', True) is False]
|
||||
|
||||
def _cleanup_failed_start(self, context, instance, network_info,
|
||||
block_device_info, guest, destroy_disks):
|
||||
@ -5537,33 +5531,14 @@ class LibvirtDriver(driver.ComputeDriver):
|
||||
block_device_info=None, power_on=True,
|
||||
vifs_already_plugged=False,
|
||||
post_xml_callback=None,
|
||||
destroy_disks_on_failure=False,
|
||||
reboot=False):
|
||||
destroy_disks_on_failure=False):
|
||||
|
||||
"""Do required network setup and create domain.
|
||||
|
||||
:param context: nova.context.RequestContext for volume API calls
|
||||
:param xml: Guest domain XML
|
||||
:param instance: nova.objects.Instance object
|
||||
:param network_info: nova.network.model.NetworkInfo for the instance
|
||||
:param block_device_info: Legacy block device info dict
|
||||
:param power_on: Whether to power on the guest after creating the XML
|
||||
:param vifs_already_plugged: False means "wait for neutron plug events"
|
||||
if using neutron, qemu/kvm, power_on=True,
|
||||
and CONF.vif_plugging_timeout configured
|
||||
:param post_xml_callback: Optional callback to call after creating the
|
||||
guest domain XML
|
||||
:param destroy_disks_on_failure: Whether to destroy the disks if we
|
||||
fail during guest domain creation
|
||||
:param reboot: Whether or not this is being called during a reboot. If
|
||||
we are rebooting, we will need to handle waiting for
|
||||
neutron plug events differently
|
||||
"""
|
||||
"""Do required network setup and create domain."""
|
||||
timeout = CONF.vif_plugging_timeout
|
||||
if (self._conn_supports_start_paused and
|
||||
utils.is_neutron() and not
|
||||
vifs_already_plugged and power_on and timeout):
|
||||
events = self._get_neutron_events(network_info, reboot=reboot)
|
||||
events = self._get_neutron_events(network_info)
|
||||
else:
|
||||
events = []
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user